input
stringlengths
2.65k
237k
output
stringclasses
1 value
s=2, label='pathogenic missense (ClinVar)') plt.scatter(lof_clin_var_poses, lof_pathogenic_ys, color=COLOR_PALETTE['V'], s=2, label='pathogenic stop gained, frameshift (ClinVar)') create_legend = False else: plt.scatter(control_missense, control_ys, color=COLOR_PALETTE['B'], s=2) plt.scatter(miss_clin_var_poses, miss_pathogenic_ys, color='black', s=2) plt.scatter(lof_clin_var_poses, lof_pathogenic_ys, color=COLOR_PALETTE['V'], s=2) y += 1 ys = np.arange(-0.5, len(gene_names) + 1, 1) plt.xticks(fontsize=7) plt.yticks(ys, [''] + gene_names, fontsize=7) plt.ylim(0,y) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False) # labels along the bottom edge are off legend_elements = [Line2D([0], [0], marker='o', color='w', label='protein altering (gnomAD)', markerfacecolor=COLOR_PALETTE['B'], markersize=7), Line2D([0], [0], marker='o', color='w', label='pathogenic missense (ClinVar)', markerfacecolor='black', markersize=7), Line2D([0], [0], marker='o', color='w', label='pathogenic stop gained,\nframeshift (ClinVar)', markerfacecolor=COLOR_PALETTE['V'], markersize=7),] plt.legend(handles=legend_elements, loc='center right', frameon=False, fontsize=7) plt.xlabel('Protein length (amino acids)', fontsize=7) if single_figure: fig = plt.figure(1) fig.set_size_inches(6.4, 2.3) plt.tight_layout(rect=[0.01, -0.05, 1, 1.1]) fig_format = 'pdf' plt.savefig(FIGURES_FOLDER + 'ClinVar_vs_regions_example_genes.' + fig_format, format=fig_format, dpi=300, transparent=True) def draw_clin_var_figure(db, length_ranges, groups_region_count, groups_region_miss, miss_percents, miss_per_positions, miss_per_position_cis, miss_per_position_raw, groups_region_lof, lof_percents, lof_per_positions, lof_per_position_cis, lof_per_position_raw, include_example_genes_subplot=False): miss_per_positions_str = [] for miss_per_position in miss_per_positions: miss_per_positions_str.append("%.3f" % round(miss_per_position,3)) lof_per_positions_str = [] for lof_per_position in lof_per_positions: lof_per_positions_str.append("%.3f" % round(lof_per_position,3)) multi_bar_labels = ['Missense', 'Stop gained, frameshift'] subplot_num = 1 if include_example_genes_subplot: row_num = 3 draw_example_gene_variants_subplot(db, subplot_num, subplot_letter='a', row_num=row_num) subplot_num += 2 subplot_letters = ['b', 'c', 'd', 'e'] else: row_num = 2 subplot_letters = ['a', 'b', 'c', 'd'] draw_region_count_subplot(subplot_num, length_ranges, groups_region_count, groups_region_count, ylabel='Variant Intolerant Regions\n(VIRs, %)', subplot_letter=subplot_letters[0], row_num=row_num) subplot_num += 1 draw_region_count_subplot(subplot_num, length_ranges, [miss_percents, lof_percents], [groups_region_miss, groups_region_lof], ylabel='Pathogenic variants (%)', multi_bar_labels=multi_bar_labels, subplot_letter=subplot_letters[1], row_num=row_num) subplot_num += 1 draw_region_count_subplot(subplot_num, length_ranges, [miss_per_positions, lof_per_positions], [miss_per_positions_str, lof_per_positions_str], cis=[miss_per_position_cis, lof_per_position_cis], raw=[miss_per_position_raw, lof_per_position_raw], ylabel='Pathogenic variants\nper amino acid', multi_bar_labels=multi_bar_labels, legend_loc='upper left', subplot_letter=subplot_letters[2], row_num=row_num) subplot_num += 1 draw_regions_gerp_whisker_figure(db, subplot_num, subplot_letter=subplot_letters[3], row_num=row_num) plt.figure(figsize = (2,2)) gs1 = gridspec.GridSpec(2, 2) gs1.update(wspace=0.05, hspace=0.05) fig = plt.figure(1) if include_example_genes_subplot: fig.set_size_inches(7.5, 6.5) else: fig.set_size_inches(7, 3.7) plt.tight_layout(rect=[0, 0, 1, 1]) plt.savefig(FIGURES_FOLDER + 'ClinVar_vs_regions.pdf', format='pdf', dpi=300) def get_transcript_id_to_chrom_dict(db): transcript_id_to_chrom = {} transcripts = db.exac.transcripts.find({}) for transcript in transcripts: transcript_id_to_chrom[transcript['transcript_id']] = transcript['chrom'] return transcript_id_to_chrom def remove_overlapping_boxplot_outliers(boxplot): for l in boxplot['fliers']: unique_ys = set() f_xs = [] f_ys = [] xs, ys = l.get_data() for i in range(0, len(ys)): y = ys[i] y = round(y, 2) if y not in unique_ys: f_xs.append(xs[i]) f_ys.append(ys[i]) unique_ys.add(y) l.set_data(f_xs, f_ys) def draw_regions_gerp_whisker_figure(db, subplot_num, subplot_letter='', row_num=3): ax = plt.subplot(row_num,2,subplot_num) transcript_id_to_chrom = get_transcript_id_to_chrom_dict(db) gnomad_transcript_ids = set([]) if INCLUDE_GNOMAD_OUTLIERS: gnomad_genes = db.gevir.gnomad_scores.find({ "canonical": True, "valid_transcript": True }) else: gnomad_genes = db.gevir.gnomad_scores.find({ "canonical": True, "valid_transcript": True, "no_issues": True }) db.gevir.temp.remove({'_id': 'region_length_gerp_raw_bins'}) unique_genes = set() unique_regions = 0 bin_names = ['1-5', '6-10', '11-15', '16-20', '21+'] bin_stats = OrderedDict() for bin_name in bin_names: bin_stats[bin_name] = [] regions = db.gevir[REGIONS_COLLECTION].find({"lenght": { "$gte": 1 }, "not_in_cds": False }) print 'Collecting regions GERP++ data' total_lines = regions.count() line_number = 0 bar = progressbar.ProgressBar(maxval=1.0).start() for region in regions: chrom = transcript_id_to_chrom[region['transcript_id']] coverage_threshold = MIN_AUTOSOMAL_COVERAGE if chrom == 'X' or chrom == 'Y': coverage_threshold = MIN_XY_COVERAGE # count only high covered regions if region['exome_coverage'] >= coverage_threshold: unique_regions += 1 unique_genes.add(region['transcript_id']) length = region['lenght'] gerp = region['gerp_mean'] if length >= 1 and length <= 5: bin_stats['1-5'].append(gerp) elif length >= 6 and length <= 10: bin_stats['6-10'].append(gerp) elif length >= 11 and length <= 15: bin_stats['11-15'].append(gerp) elif length >= 16 and length <= 20: bin_stats['16-20'].append(gerp) elif length >= 21: bin_stats['21+'].append(gerp) else: print 'unexpected length', length line_number += 1 bar.update((line_number + 0.0) / total_lines) bar.finish() print 'unique genes', len(unique_genes) print 'unique regions', unique_regions # Draw boxplots flierprops = dict(marker='o', markerfacecolor=COLOR_PALETTE['B'], markersize=2, markeredgecolor=COLOR_PALETTE['B']) xs = [] x = 0 boxplots = [] for bin_name, gerps in bin_stats.iteritems(): print bin_name, '| regions:', len(gerps) boxplot = plt.boxplot(gerps, positions=[x], widths=0.8, notch=False, showfliers=True, patch_artist=True, flierprops=flierprops) # Enhance the speed of PDF load remove_overlapping_boxplot_outliers(boxplot) boxplots.append(boxplot) x += 1 xs.append(x) # Colour boxplots for boxplot in boxplots: for patch in boxplot['boxes']: patch.set_facecolor(C_LIGHT_GRAY) #COLOR_PALETTE['B'] for patch in boxplot['medians']: patch.set_color(C_BLACK) # 'yellow' # Plot expected (median) and set axis ticks and labels if subplot_letter: ax.text(-0.13, 1.11, subplot_letter, transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='right') else: ax.text(-0.13, 1.11, SUBPLOT_LETTERS[subplot_num], transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='right') xs = list(xs) normal_xs = [-0.8] + xs + [max(xs) + 0.8] plt.xticks(xs, bin_names, fontsize=7) plt.yticks([-12, -9, -6, -3, 0, 3, 6], fontsize=7) plt.xticks(range(-1, len(xs), 1), [''] + bin_names) plt.ylabel('GERP++ (mean)', fontsize=7) plt.xlabel('VIR length bins', fontsize=7) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) def draw_regions_gerp_figure(db, subplot_num, subplot_letter='', row_num=3): region_length_gerps_strs = db.gevir.temp.find_one({'_id': 'region_length_gerps'}) if not region_length_gerps_strs: gnomad_transcript_ids = set([]) if INCLUDE_GNOMAD_OUTLIERS: gnomad_genes = db.gevir.gnomad_scores.find({ "canonical": True, "valid_transcript": True }) else: gnomad_genes = db.gevir.gnomad_scores.find({ "canonical": True, "valid_transcript": True, "no_issues": True }) for gnomad_gene in gnomad_genes: gnomad_transcript_ids.add(gnomad_gene['_id']) region_length_gerps = {} regions = db.gevir[REGIONS_COLLECTION].find({"exome_coverage": { "$gte": 50.0 }, "lenght": { "$gte": 1 }}) total_lines = regions.count() line_number = 0 bar = progressbar.ProgressBar(maxval=1.0).start() for region in regions: if region['transcript_id'] not in gnomad_transcript_ids: line_number += 1 continue length = region['lenght'] if length > 20: length = 21 if length not in region_length_gerps: region_length_gerps[length] = [] region_length_gerps[length].append(region['gerp_mean']) line_number += 1 bar.update((line_number + 0.0) / total_lines) bar.finish() region_length_gerps_means = OrderedDict() region_length_gerps_sems = OrderedDict() for length in region_length_gerps: region_length_gerps_means[str(length)] = np.mean(region_length_gerps[length]) region_length_gerps_sems[str(length)] = stats.sem(region_length_gerps[length]) db.gevir.temp.insert({'_id': 'region_length_gerps', 'means': region_length_gerps_means, 'sems': region_length_gerps_sems }) region_length_gerps_strs = db.gevir.temp.find_one({'_id': 'region_length_gerps'}) region_length_gerps_means = region_length_gerps_strs['means'] region_length_gerps_sems = region_length_gerps_strs['sems'] xs = range(1, 22) ys = [] ax = plt.subplot(row_num,2,subplot_num) for x in xs: ys.append(region_length_gerps_means[str(x)]) if subplot_letter: ax.text(-0.08, 1.11, subplot_letter, transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='right') else: ax.text(-0.08, 1.11, SUBPLOT_LETTERS[subplot_num], transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='right') plt.errorbar(xs, ys, yerr=region_length_gerps_sems.values(), capsize=4, color=COLOR_PALETTE['B'], fmt='o', markersize=3) # plt.xticks(range(0, 20, 3) + [21], fontsize=7) plt.yticks(fontsize=7) plt.ylim(0,4) ax.set_xticklabels([str(n) for n in range(0, 20, 3)] + ['21+']) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.ylabel('GERP++ (mean)', fontsize=7) plt.xlabel('VIR length', fontsize=7) ############################################################################# ### Figure 3, Supplementary Figures 2 and 4 : GeVIR vs gnomAD constraints ### ############################################################################# def calculate_cumulative_percentage(ranked_genes, metric_set, precise=True): metric_num = float(len(metric_set)) percentages = [] # Calculate cumulative percentage for each gene (more accurate, but requires longer time to run) if precise: genes_so_far = set([]) for gene in ranked_genes: genes_so_far.add(gene) percentages.append(len(genes_so_far & metric_set) / metric_num * 100) else: # Calculate cumulative percentage in bins for each % bins = [set(x) for x in np.array_split(ranked_genes, 100)] overlap_so_far = 0 for x in range(0, 100): overlap_so_far += len(bins[x] & metric_set) / metric_num * 100 percentages.append(overlap_so_far) return percentages def calculate_cumulative_f1_percentage(ranked_genes, ad_set, ar_set, precise=True): all_ad = len(ad_set) f1_percentiles = [] if precise: genes_so_far = set([]) for gene in ranked_genes: genes_so_far.add(gene) ad = len(ad_set & genes_so_far) ar = len(ar_set & genes_so_far) metrics = get_precision_recall_f1(ad, ar, all_ad) f1_percentiles.append(metrics[2]) else: ad_so_far = set([]) ar_so_far = set([]) bins = [set(x) for x in np.array_split(ranked_genes, 100)] for x in range(0, 100): ad_so_far |= bins[x] & ad_set ar_so_far |= bins[x] & ar_set ad = len(ad_so_far) ar = len(ar_so_far) metrics = get_precision_recall_f1(ad, ar, all_ad) f1_percentiles.append(metrics[2]) return f1_percentiles def calculate_similiarity_percentage(ranked_genes_1, ranked_genes_2): overlap_so_far = 0 ranked_genes_1_so_far = set([]) ranked_genes_2_so_far = set([]) percentages = [] bins_1 = [set(x) for x in np.array_split(ranked_genes_1, 100)] bins_2 = [set(x) for x in np.array_split(ranked_genes_2, 100)] for x in range(0, 100): ranked_genes_1_so_far |= bins_1[x] ranked_genes_2_so_far |= bins_2[x] overlap = len(ranked_genes_1_so_far & ranked_genes_2_so_far) / float(len(ranked_genes_1_so_far)) * 100 percentages.append(overlap) return percentages def draw_cumulative_percentage_subplot(subplot_num, scores_ys, title='', linestyle='-', report_auc=False, show_expected=False, report_peak=False, legend_loc='lower right', legend_order_reverse=True, precise=True): ax = plt.subplot(3,3,subplot_num) ax.set_title(title, loc='center', fontsize=7) ax.text(-0.25, 1.1, SUBPLOT_LETTERS[subplot_num], transform=ax.transAxes, fontsize=7, fontweight='bold', va='top', ha='right') if precise: points_num = len(scores_ys[scores_ys.keys()[0]]) xs_raw = range(0, points_num) points_num = float(points_num) xs = [] for x in xs_raw: xs.append(((x + 1) / points_num) * 100) else: xs = range(1,101) if report_peak: score_peaks = OrderedDict() for score_name, ys in scores_ys.iteritems(): max_score_y = 0 max_score_x = 0 if precise: for i in range(0, len(xs)): score_y = ys[i] x = xs[i] if score_y > max_score_y: max_score_y = score_y max_score_x = x else: for x in xs: score_y = ys[x-1] if score_y > max_score_y: max_score_y = score_y max_score_x = x score_peaks[score_name] = (max_score_x, max_score_y) scores_auc = OrderedDict() scores_xy = OrderedDict() scores_labels = OrderedDict() scores_colors = [] for score_name, ys in scores_ys.iteritems(): score_auc = sklearn_auc(xs, ys) / 100 scores_auc[score_name] = sklearn_auc(xs, ys) scores_xy[score_name] = (xs, ys) if report_auc: label = score_name + '\nAUC: %.2f%%' % score_auc elif report_peak: max_score_x, max_score_y = score_peaks[score_name] scores_auc[score_name] = max_score_y label = score_name + '\nPeak: %.2f%%' % max_score_y + ' (F1), ' + ('%.2f%%' % max_score_x) + ' (Rank)' else: label = score_name scores_labels[score_name] = label if report_auc or report_peak: scores_auc = sort_dict_by_values(scores_auc, reverse=legend_order_reverse) for score_name in scores_auc: xs, ys = scores_xy[score_name] plt.plot(xs, ys, color=SCORE_COLORS[score_name], linestyle=linestyle, label=scores_labels[score_name], linewidth=1) if report_peak: max_score_x, max_score_y = score_peaks[score_name] plt.scatter([max_score_x], [max_score_y], s=9, color=SCORE_COLORS[score_name]) score_peaks scores_colors.append(SCORE_COLORS[score_name]) #if report_auc or report_peak: l = plt.legend(loc=legend_loc, frameon=False, fontsize=5, handlelength=1.0) for line in l.legendHandles: line.set_linewidth(2.0) if show_expected: plt.plot(xs, xs, '--', label='Expected', color=C_GRAY, linewidth=1) plt.xticks(range(0, 101, 10), fontsize=7) plt.yticks(fontsize=7) ax.set_xticklabels([str(n) for n in range(0, 110, 10)]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.ylabel('Cumulative percentage (%)', fontsize=7) plt.xlabel('Rank (%)', fontsize=7) # Percentiles class FoldEnrichmentAR(): def __init__(self): self.decile = 0 self.ar = 0 self.genes = 0 self.fe = 0.0 self.p_value = 0.0 def get_dictionary(self): dictionary = OrderedDict() dictionary['decile'] = self.decile dictionary['ar'] = self.ar dictionary['genes'] = self.genes dictionary['fe'] = self.fe dictionary['p_value'] = self.p_value return dictionary def draw_gaps_vs_miss_z_ad_ar_subplot(subplot_num, scores_data, metric_set, ylabel='', title='', linestyle='-', ax='', for_letter=False): print 'AR ANALYSIS' font_size = 7 total_ar = len(metric_set) total_genes = set([]) for score_set in scores_data[scores_data.keys()[0]]: total_genes |= score_set total_genes = len(total_genes) print 'AR', total_ar, 'TOTAL', total_genes n_bars = len(scores_data) xs = np.arange(len(scores_data.values()[0]))#range(0, n_bars) bar_width, x_paddings = bar_chart_get_bar_width_and_x_paddings(n_bars) metric_num = float(len(metric_set)) if metric_num == 0: metric_num = 1 if not ax: ax = plt.subplot(1,3,subplot_num) ax.set_title(title, loc='center', fontsize=font_size) if for_letter: ax.text(-0.11, 1.05, SUBPLOT_LETTERS[subplot_num], transform=ax.transAxes, fontsize=7, fontweight='bold', va='top', ha='right') else: ax.text(-0.2, 1.1, SUBPLOT_LETTERS[subplot_num], transform=ax.transAxes, fontsize=7, fontweight='bold', va='top', ha='right') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) top_deciles = set([1,2]) mid_deciles = set([4,5,6]) last_deciles = set([8,9,10]) top_row = ['1, 2 (first 20%)'] mid_row = ['4, 5, 6 (mid 30%)'] last_row = ['8, 9, 10 (last 30%)'] score_fears = OrderedDict() ys_scores = [] for x in range(0, n_bars): score_name = scores_data.keys()[x] score_fears[score_name] = OrderedDict() ys_score = [] decile = 1 top_ar = set([]) top_all = set([]) mid_ar = set([]) mid_all = set([]) last_ar = set([]) last_all = set([]) for score_set in scores_data[score_name]: n = len(score_set & metric_set) p = n * 100 / metric_num ys_score.append(p) fear = FoldEnrichmentAR() fear.decile = decile fear.ar = n fear.genes = len(score_set) fear.fe, fear.p_value = fisher_exact([[fear.ar, fear.genes],[total_ar, total_genes]]) score_fears[score_name][decile] = fear if decile in top_deciles: top_ar |= score_set & metric_set top_all |= score_set if decile in mid_deciles: mid_ar |= score_set & metric_set mid_all |= score_set if decile in last_deciles: last_ar |= score_set & metric_set last_all |= score_set decile += 1 ys_scores.append(ys_score) top_ar_fe, top_ar_p_value = fisher_exact([[len(top_ar), len(top_all)],[total_ar, total_genes]]) mid_ar_fe, mid_ar_p_value = fisher_exact([[len(mid_ar), len(mid_all)],[total_ar, total_genes]]) last_ar_fe, last_ar_p_value = fisher_exact([[len(last_ar), len(last_all)],[total_ar, total_genes]]) top_row += [len(top_ar), len(top_all), top_ar_fe, top_ar_p_value] mid_row += [len(mid_ar), len(mid_all), mid_ar_fe, mid_ar_p_value] last_row += [len(last_ar), len(last_all), last_ar_fe, last_ar_p_value] if for_letter: plt.plot(xs, ys_score, linestyle=linestyle, color=SCORE_COLORS[score_name], label=score_name) # .replace('\n', ' ') plt.scatter(xs, ys_score, s=5, color=SCORE_COLORS[score_name]) else: plt.plot(xs,
# load the grain map if available if use_dct_path: grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file) else: grain_map_path = os.path.join(data_dir, vol_file) if os.path.exists(grain_map_path): with h5py.File(grain_map_path, 'r') as f: # because how matlab writes the data, we need to swap X and Z # axes in the DCT volume micro.set_grain_map(f['vol'][()].transpose(2, 1, 0), voxel_size) if verbose: print('loaded grain ids volume with shape: {}' ''.format(micro.get_grain_map().shape)) # load the mask if available if use_dct_path: mask_path = os.path.join(data_dir, '5_reconstruction', mask_file) else: mask_path = os.path.join(data_dir, mask_file) if os.path.exists(mask_path): try: with h5py.File(mask_path, 'r') as f: mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8) # check if mask shape needs to be zero padded if not mask.shape == micro.get_grain_map().shape: offset = np.array(micro.get_grain_map().shape) - np.array(mask.shape) padding = [(o // 2, o // 2) for o in offset] print('mask padding is {}'.format(padding)) mask = np.pad(mask, padding, mode='constant') print('now mask shape is {}'.format(mask.shape)) micro.set_mask(mask, voxel_size) except: # fallback on matlab format micro.set_mask(loadmat(mask_path)['vol'], voxel_size) if verbose: print('loaded mask volume with shape: {}'.format(micro.get_mask().shape)) return micro @staticmethod def from_legacy_h5(file_path, filename=None): """read a microstructure object from a HDF5 file created by pymicro until version 0.4.5. :param str file_path: the path to the file to read. :return: the new `Microstructure` instance created from the file. """ with h5py.File(file_path, 'r') as f: if filename is None: filename = f.attrs['microstructure_name'] micro = Microstructure(name=filename, overwrite_hdf5=True) if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs: sym = f['EnsembleData/CrystalStructure'].attrs['symmetry'] parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()] micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters)) if 'data_dir' in f.attrs: micro.data_dir = f.attrs['data_dir'] # load feature data if 'R_vectors' in f['FeatureData']: print('some grains') avg_rods = f['FeatureData/R_vectors'][()] print(avg_rods.shape) if 'grain_ids' in f['FeatureData']: grain_ids = f['FeatureData/grain_ids'][()] else: grain_ids = range(1, 1 + avg_rods.shape[0]) if 'centers' in f['FeatureData']: centers = f['FeatureData/centers'][()] else: centers = np.zeros_like(avg_rods) # add all grains to the microstructure grain = micro.grains.row for i in range(avg_rods.shape[0]): grain['idnumber'] = grain_ids[i] grain['orientation'] = avg_rods[i, :] grain['center'] = centers[i] grain.append() micro.grains.flush() # load cell data if 'grain_ids' in f['CellData']: micro.set_grain_map(f['CellData/grain_ids'][()], f['CellData/grain_ids'].attrs['voxel_size']) micro.recompute_grain_bounding_boxes() micro.recompute_grain_volumes() if 'mask' in f['CellData']: micro.set_mask(f['CellData/mask'][()], f['CellData/mask'].attrs['voxel_size']) return micro @staticmethod def from_ebsd(file_path, roi=None, tol=5., min_ci=0.2): """"Create a microstructure from an EBSD scan. :param str file_path: the path to the file to read. :param list roi: a list of 4 integers in the form [x1, x2, y1, y2] to crop the EBSD scan. :param float tol: the misorientation angle tolerance to segment the grains (default is 5 degrees). :param float min_ci: minimum confidence index for a pixel to be a valid EBSD measurement. :return: a new instance of `Microstructure`. """ # Get name of file and create microstructure instance name = os.path.splitext(os.path.basename(file_path))[0] micro = Microstructure(name=name, autodelete=False, overwrite_hdf5=True) from pymicro.crystal.ebsd import OimScan # Read raw EBSD .h5 data file from OIM scan = OimScan.from_file(file_path) micro.set_phases(scan.phase_list) if roi: print('importing data from region {}'.format(roi)) scan.cols = roi[1] - roi[0] scan.rows = roi[3] - roi[2] scan.iq = scan.iq[roi[0]:roi[1], roi[2]:roi[3]] scan.ci = scan.ci[roi[0]:roi[1], roi[2]:roi[3]] scan.euler = scan.euler[roi[0]:roi[1], roi[2]:roi[3], :] # change the orientation reference frame to XYZ scan.change_orientation_reference_frame() iq = scan.iq ci = scan.ci euler = scan.euler mask = np.ones_like(iq) # segment the grains grain_ids = scan.segment_grains(tol=tol, min_ci=min_ci) voxel_size = np.array([scan.xStep, scan.yStep]) micro.set_grain_map(grain_ids, voxel_size) # add each array to the data file to the CellData image Group micro.add_field(gridname='CellData', fieldname='mask', array=mask, replace=True) micro.add_field(gridname='CellData', fieldname='iq', array=iq, replace=True) micro.add_field(gridname='CellData', fieldname='ci', array=ci, replace=True) micro.add_field(gridname='CellData', fieldname='euler', array=euler, replace=True) # Fill GrainDataTable grains = micro.grains.row grain_ids_list = np.unique(grain_ids).tolist() for gid in grain_ids_list: if gid == 0: continue progress = 100 * (1 + grain_ids_list.index(gid)) / len(grain_ids_list) print('creating new grains [{:.2f} %]: adding grain {:d}'.format( progress, gid), end='\r') # get the symmetry for this grain phase_grain = scan.phase[np.where(grain_ids == 1)] assert len(np.unique(phase_grain)) == 1 # all pixel of this grain must have the same phase id by # construction grain_phase_id = phase_grain[0] sym = scan.phase_list[grain_phase_id].get_symmetry() # compute the mean orientation for this grain euler_grain = scan.euler[np.where(grain_ids == gid)] #euler_grain = np.atleast_2d(euler_grain) # for one pixel grains euler_grain = euler_grain rods = Orientation.eu2ro(euler_grain) rods = np.atleast_2d(rods) # for one pixel grains o_tsl = Orientation.compute_mean_orientation(rods, symmetry=sym) grains['idnumber'] = gid grains['orientation'] = o_tsl.rod grains.append() micro.grains.flush() micro.recompute_grain_bounding_boxes() micro.recompute_grain_centers(verbose=False) micro.recompute_grain_volumes(verbose=False) micro.recompute_grain_bounding_boxes(verbose=False) micro.sync() return micro @staticmethod def merge_microstructures(micros, overlap, translation_offset=[0, 0, 0], plot=False): """Merge two `Microstructure` instances together. The function works for two microstructures with grain maps and an overlap between them. Temporarily `Microstructures` restricted to the overlap regions are created and grains are matched between the two based on a disorientation tolerance. .. note:: The two microstructure must have the same crystal lattice and the same voxel_size for this method to run. :param list micros: a list containing the two microstructures to merge. :param int overlap: the overlap to use. :param list translation_offset: a manual translation (in voxels) offset to add to the result. :param bool plot: a flag to plot some results. :return: a new `Microstructure` instance containing the merged microstructure. """ from scipy import ndimage # perform some sanity checks for i in range(2): if micros[i]._is_empty('grain_map'): raise ValueError('microstructure instance %s must have an ' 'associated grain_map attribute' % micros[i].get_sample_name()) if micros[0].get_lattice() != micros[1].get_lattice(): raise ValueError('both microstructure must have the same crystal ' 'lattice') lattice = micros[0].get_lattice() if micros[0].get_voxel_size() != micros[1].get_voxel_size(): raise ValueError('both microstructure must have the same' ' voxel size') voxel_size = micros[0].get_voxel_size() if len(micros[0].get_grain_map().shape) == 2: raise ValueError('Microstructures to merge must be tridimensional') if len(micros[1].get_grain_map().shape) == 2: raise ValueError('Microstructures to merge must be tridimensional') # create two microstructures for the two overlapping regions: # end slices in first scan and first slices in second scan micro1_ol = micros[0].crop(z_start=micros[0].get_grain_map().shape[2] - overlap, autodelete=True) micro2_ol = micros[1].crop(z_end=overlap, autodelete=True) micros_ol = [micro1_ol, micro2_ol] # match grain from micros_ol[1] to micros_ol[0] (the reference) matched, _, unmatched = micros_ol[0].match_grains(micros_ol[1], verbose=True) # to find the translation, we compute the differences in coordinates of # the center of mass of the matched grains between the two microstructures translation_mm = np.zeros(3) for i in range(len(matched)): # look at the pair of grains match = matched[i] delta = (micros_ol[0].get_grain(match[0]).center - micros_ol[1].get_grain(match[1]).center) translation_mm += delta translation_mm /= len(matched) # account for the origin of the overlap region translation_mm[2] += (micros[0].get_grain_map().shape[2] - overlap) * voxel_size print('average shift (voxels): {}'.format(translation_mm / voxel_size)) translation_voxel = (translation_mm / voxel_size).astype(int) print('translation is in mm: {}'.format(translation_mm)) print('translation is in voxels {}'.format(translation_voxel)) # manually correct the result if necessary translation_voxel += translation_offset # now delete overlapping microstructures del micro1_ol, micro2_ol # look at ids in the reference volume ids_ref = np.unique(micros[0].get_grain_map()) ids_ref_list = ids_ref.tolist() if -1 in ids_ref_list: ids_ref_list.remove(-1) # grain overlap if 0 in ids_ref_list: ids_ref_list.remove(0) # background print(ids_ref_list) id_offset = max(ids_ref_list) print('grain ids in volume %s will be offset by %d' % (micros[1].get_sample_name(), id_offset)) # gather ids in the merging volume (will be modified) ids_mrg = np.unique(micros[1].get_grain_map()) ids_mrg_list = ids_mrg.tolist() if -1 in ids_mrg_list: ids_mrg_list.remove(-1) # grain overlap if 0 in ids_mrg_list: ids_mrg_list.remove(0) # background print(ids_mrg_list) # prepare a volume with the same size as the second grain map, # with grain ids renumbered and (X, Y) translations applied. grain_map = micros[1].get_grain_map(as_numpy=True) grain_map_translated = grain_map.copy() print('renumbering grains in the overlap region of volume %s' % micros[1].get_sample_name()) for match in matched: ref_id, other_id = match #TODO get this done faster (could be factorized in the method renumber_grains print('replacing %d by %d' % (other_id, ref_id)) grain_map_translated[grain_map == other_id] = ref_id try: ids_mrg_list.remove(other_id) except ValueError: # this can happen if a reference grain was matched to more than 1 grain print('%d was not in list anymore' % other_id) # also renumber the rest using the offset renumbered_grains = [] for i, other_id in enumerate(ids_mrg_list): new_id = id_offset + i + 1 grain_map_translated[grain_map == other_id] = new_id print('replacing %d by %d' % (other_id, new_id)) renumbered_grains.append([other_id, new_id]) # apply translation along the (X, Y) axes grain_map_translated = np.roll(grain_map_translated, translation_voxel[:2], (0, 1)) check = overlap // 2 print(grain_map_translated.shape) print(overlap) print(translation_voxel[2] + check) if plot: slice_ref = micros[0].get_grain_map()[:, :, translation_voxel[2] + check] slice_renum = grain_map_translated[:, :, check] id_max = max(slice_ref.max(), slice_renum.max()) fig = plt.figure(figsize=(15, 7)) ax1 = fig.add_subplot(1, 3, 1) ax1.imshow(micros[0].get_grain_map()[:, :,
2.63141], [1562500, 2, 0, 8, 0, 2.898], [1568000, 8, 0, 3, 2, 2.24168], [1572864, 19, 1, 0, 0, 2.00201], [1574640, 4, 9, 1, 0, 2.48954], [1575000, 3, 2, 5, 1, 2.846], [1580544, 9, 2, 0, 3, 2.65], [1587600, 4, 4, 2, 2, 2.40683], [1594323, 0, 13, 0, 0, 20.2312], [1600000, 9, 0, 5, 0, 2.39023], [1605632, 15, 0, 0, 2, 1.95452], [1607445, 0, 8, 1, 2, 18.3052], [1612800, 10, 2, 2, 1, 2.45997], [1613472, 5, 1, 0, 5, 2.96501], [1620000, 5, 4, 4, 0, 2.14727], [1620675, 0, 3, 2, 4, 19.0948], [1632960, 6, 6, 1, 1, 2.64216], [1638400, 16, 0, 2, 0, 1.91255], [1640250, 1, 8, 3, 0, 22.8526], [1640625, 0, 1, 7, 1, 18.9036], [1646400, 6, 1, 2, 3, 2.79919], [1647086, 1, 0, 0, 7, 18.9253], [1653372, 2, 10, 0, 1, 3.0677], [1653750, 1, 3, 4, 2, 18.9215], [1658880, 12, 4, 1, 0, 2.17512], [1666980, 2, 5, 1, 3, 3.36083], [1679616, 8, 8, 0, 0, 1.95226], [1680000, 7, 1, 4, 1, 2.60838], [1680700, 2, 0, 2, 5, 3.31414], [1687500, 2, 3, 6, 0, 3.12249], [1693440, 8, 3, 1, 2, 2.59195], [1701000, 3, 5, 3, 1, 2.82602], [1714608, 4, 7, 0, 2, 2.64372], [1715000, 3, 0, 4, 3, 3.03744], [1720320, 14, 1, 1, 1, 2.66598], [1728000, 9, 3, 3, 0, 2.61832], [1728720, 4, 2, 1, 4, 3.2197], [1741824, 10, 5, 0, 1, 2.58518], [1749600, 5, 7, 2, 0, 2.37168], [1750000, 4, 0, 6, 1, 2.93982], [1750329, 0, 6, 0, 4, 19.8886], [1756160, 10, 0, 1, 3, 2.94948], [1764000, 5, 2, 3, 2, 2.87406], [1764735, 0, 1, 1, 6, 19.9018], [1769472, 16, 3, 0, 0, 2.07878], [1771470, 1, 11, 1, 0, 25.9569], [1771875, 0, 4, 5, 1, 23.2933], [1778112, 6, 4, 0, 3, 2.64475], [1786050, 1, 6, 2, 2, 23.316], [1792000, 11, 0, 3, 1, 2.65161], [1800000, 6, 2, 5, 0, 2.68621], [1800750, 1, 1, 3, 4, 24.6166], [1806336, 12, 2, 0, 2, 2.56173], [1814400, 7, 4, 2, 1, 2.43883], [1815156, 2, 3, 0, 5, 3.5635], [1822500, 2, 6, 4, 0, 3.08974], [1835008, 18, 0, 0, 1, 2.32361], [1837080, 3, 8, 1, 1, 3.10818], [1837500, 2, 1, 5, 2, 3.6174], [1843200, 13, 2, 2, 0, 2.33296], [1843968, 8, 1, 0, 4, 2.99649], [1852200, 3, 3, 2, 3, 3.30647], [1866240, 9, 6, 1, 0, 2.95531], [1875000, 3, 1, 7, 0, 3.31635], [1881600, 9, 1, 2, 2, 3.15084], [1882384, 4, 0, 0, 6, 3.3193], [1889568, 5, 10, 0, 0, 2.74542], [1890000, 4, 3, 4, 1, 3.0279], [1905120, 5, 5, 1, 2, 3.15232], [1913625, 0, 7, 3, 1, 24.4047], [1920000, 10, 1, 4, 0, 2.91019], [1920800, 5, 0, 2, 4, 3.04262], [1928934, 1, 9, 0, 2, 24.7808], [1929375, 0, 2, 4, 3, 24.7398], [1935360, 11, 3, 1, 1, 3.00795], [1944000, 6, 5, 3, 0, 2.7964], [1944810, 1, 4, 1, 4, 25.1367], [1953125, 0, 0, 9, 0, 25.1488], [1959552, 7, 7, 0, 1, 2.69464], [1960000, 6, 0, 4, 2, 2.84884], [1966080, 17, 1, 1, 0, 2.86164], [1968300, 2, 9, 2, 0, 3.25838], [1968750, 1, 2, 6, 1, 26.9819], [1975680, 7, 2, 1, 3, 3.35392], [1984500, 2, 4, 3, 2, 3.59494], [1990656, 13, 5, 0, 0, 2.77804], [2000000, 7, 0, 6, 0, 2.87052], [2000376, 3, 6, 0, 3, 3.60705], [2007040, 13, 0, 1, 2, 2.83776], [2016000, 8, 2, 3, 1, 3.06563], [2016840, 3, 1, 1, 5, 4.27889], [2025000, 3, 4, 5, 0, 3.19659], [2032128, 9, 4, 0, 2, 2.95949], [2041200, 4, 6, 2, 1, 3.31987], [2048000, 14, 0, 3, 0, 2.69334], [2058000, 4, 1, 3, 3, 3.88985], [2064384, 15, 2, 0, 1, 2.69388], [2066715, 0, 10, 1, 1, 22.9791], [2073600, 10, 4, 2, 0, 2.71034], [2074464, 5, 3, 0, 4, 3.35579], [2083725, 0, 5, 2, 3, 23.1837], [2097152, 21, 0, 0, 0, 2.65724], [2099520, 6, 8, 1, 0, 2.8658], [2100000, 5, 1, 5, 1, 3.61687], [2100875, 0, 0, 3, 5, 25.3883], [2107392, 11, 1, 0, 3, 3.50671], [2109375, 0, 3, 7, 0, 25.3964], [2116800, 6, 3, 2, 2, 3.14152], [2117682, 1, 2, 0, 6, 25.3967], [2125764, 2, 12, 0, 0, 3.25688], [2126250, 1, 5, 4, 1, 26.125], [2143260, 2, 7, 1, 2, 4.08617], [2143750, 1, 0, 5, 3, 26.1215], [2150400, 12, 1, 2, 1, 3.25176], [2151296, 7, 0, 0, 5, 3.43673], [2160000, 7, 3, 4, 0, 2.8559], [2160900, 2, 2, 2, 4, 4.23144], [2177280, 8, 5, 1, 1, 3.38441], [2187000, 3, 7, 3, 0, 3.48231], [2187500, 2, 0, 7, 1, 4.12085], [2195200, 8, 0, 2, 3, 3.23133], [2204496, 4, 9, 0, 1, 3.43431], [2205000, 3, 2, 4, 2, 3.88702], [2211840, 14, 3, 1, 0, 2.94106], [2222640, 4, 4, 1, 3, 3.89115], [2239488, 10, 7, 0, 0, 2.99942], [2240000, 9, 0, 4, 1, 3.42502], [2250000, 4, 2, 6, 0, 3.78082], [2250423, 0, 8, 0, 3, 30.5133], [2257920, 10, 2, 1, 2, 3.74315], [2268000, 5, 4, 3, 1, 3.42085], [2268945, 0, 3, 1, 5, 30.5573], [2278125, 0, 6, 5, 0, 30.5391], [2286144, 6, 6, 0, 2, 3.45572], [2293760, 16, 0, 1, 1, 3.23417], [2296350, 1, 8, 2, 1, 30.5514], [2296875, 0, 1, 6, 2, 30.5431], [2304000, 11, 2, 3, 0, 3.37274], [2304960, 6, 1, 1, 4, 4.22506], [2315250, 1, 3, 3, 3, 29.0076], [2322432, 12, 4, 0, 1, 3.01162], [2332800, 7, 6, 2, 0, 3.16754], [2333772, 2, 5, 0, 4, 4.42748], [2343750, 1, 1, 8, 0, 38.6922], [2352000, 7, 1, 3, 2, 3.82671], [2352980, 2, 0, 1, 6, 4.85092], [2359296, 18, 2, 0, 0, 2.96004], [2361960, 3, 10, 1, 0, 3.96747], [2362500, 2, 3, 5, 1, 4.43857], [2370816, 8, 3, 0, 3, 3.55613], [2381400, 3, 5, 2, 2, 3.92434], [2400000, 8, 1, 5, 0, 3.62478], [2401000, 3, 0, 3, 4, 4.25762], [2408448, 14, 1, 0, 2, 3.40682], [2419200, 9, 3, 2, 1, 3.74443], [2420208, 4, 2, 0, 5, 4.483], [2430000, 4, 5, 4, 0, 3.66097], [2449440, 5, 7, 1, 1, 3.90851], [2450000, 4, 0, 5, 2, 4.02967], [2457600, 15, 1, 2, 0, 3.23633], [2458624, 10, 0, 0, 4, 3.83817], [2460375, 0, 9, 3, 0, 34.4254], [2469600, 5, 2, 2, 3, 4.13356], [2470629, 0, 1, 0, 7, 34.4493], [2480058, 1, 11, 0, 1, 34.4467], [2480625, 0, 4, 4, 2, 34.4368], [2488320, 11, 5, 1, 0, 3.73755], [2500000, 5, 0, 7, 0, 3.6721], [2500470, 1, 6, 1, 3, 30.4162], [2508800, 11, 0, 2, 2, 3.55415], [2519424, 7, 9, 0, 0, 3.246], [2520000, 6, 2, 4, 1, 3.86775], [2521050, 1, 1, 2, 5, 38.1393], [2531250, 1, 4, 6, 0, 38.156], [2540160, 7, 4, 1, 2, 3.77703], [2551500, 2, 6, 3, 1, 4.83121], [2560000, 12, 0, 4, 0, 3.22759], [2571912, 3, 8, 0, 2, 4.0351], [2572500, 2, 1, 4, 3, 5.2924], [2580480, 13, 2, 1, 1, 3.88666], [2592000, 8, 4, 3, 0, 3.33906], [2593080, 3, 3, 1, 4, 4.952], [2612736, 9, 6, 0, 1, 4.12244], [2621440, 19, 0, 1, 0, 3.33425], [2624400, 4, 8, 2, 0, 3.61666], [2625000, 3, 1, 6, 1, 5.00895], [2634240, 9, 1, 1, 3, 5.00876], [2646000, 4, 3, 3, 2, 4.44579], [2654208, 15, 4, 0, 0, 3.06625], [2657205, 0, 12, 1, 0, 34.8656], [2667168, 5, 5, 0, 3, 4.34164], [2679075, 0, 7, 2, 2, 34.879], [2688000, 10, 1, 3, 1, 4.56847], [2689120, 5, 0, 1, 5, 4.91318], [2700000, 5, 3, 5, 0, 4.05037], [2701125, 0, 2, 3, 4, 39.3998], [2709504, 11, 3, 0, 2, 3.93133], [2721600, 6, 5, 2, 1, 4.06101], [2722734, 1, 4, 0, 5, 39.3544], [2733750, 1, 7, 4, 0, 39.3733], [2734375, 0, 0, 8, 1, 31.258], [2744000, 6, 0, 3, 3, 4.4422], [2752512, 17, 1, 0, 1, 3.91522], [2755620, 2, 9, 1, 1, 5.24336], [2756250, 1, 2, 5, 2, 31.2609], [2764800, 12, 3, 2, 0, 3.57748], [2765952, 7, 2, 0, 4, 4.37877], [2778300, 2, 4, 2, 3, 5.12567], [2799360, 8, 7, 1, 0, 3.87615], [2800000, 7, 0, 5, 1, 4.15894], [2809856, 13, 0, 0, 3, 3.92407], [2812500, 2, 2, 7, 0, 5.24209], [2822400, 8, 2, 2, 2, 4.12453], [2823576, 3, 1, 0, 6, 5.68644], [2834352, 4, 11, 0, 0, 3.99261], [2835000, 3, 4, 4, 1, 4.62375], [2857680, 4, 6, 1, 2, 5.03485], [2867200, 14, 0, 2, 1, 3.66649], [2880000, 9, 2, 4, 0, 4.31902], [2881200, 4, 1, 2, 4, 5.31633], [2893401, 0, 10, 0, 2, 38.4548], [2903040, 10, 4, 1, 1, 4.49252], [2916000, 5, 6, 3, 0, 4.46507], [2917215, 0, 5, 1, 4, 37.2707], [2939328, 6, 8, 0, 1, 3.98326], [2940000, 5, 1, 4, 2, 4.89405], [2941225, 0, 0, 2, 6, 37.2932], [2949120, 16, 2, 1, 0, 4.12249], [2952450, 1, 10, 2, 0, 42.4574], [2953125, 0, 3, 6, 1, 34.3273], [2963520, 6, 3, 1, 3, 5.10796], [2976750, 1, 5, 3, 2, 34.3477], [2985984, 12, 6, 0, 0, 3.95205], [3000000, 6, 1, 6, 0, 4.95559], [3000564, 2, 7, 0, 3, 5.6], [3001250, 1, 0, 4, 4, 43.8], [3010560, 12, 1, 1, 2, 4.9922], [3024000, 7, 3, 3, 1, 4.56308], [3025260, 2, 2, 1, 5, 6.60685], [3037500, 2, 5, 5, 0, 5.51438], [3048192, 8, 5, 0, 2, 4.30672], [3061800, 3, 7, 2, 1, 5.07325], [3062500, 2, 0, 6, 2, 6.09191], [3072000, 13, 1, 3, 0, 4.73401], [3073280, 8, 0, 1, 4, 4.95228], [3087000, 3, 2, 3, 3, 5.92081], [3096576, 14, 3, 0, 1, 4.05237], [3110400, 9, 5, 2, 0, 4.61061], [3111696, 4, 4, 0, 4, 5.08254], [3125000, 3, 0, 8, 0, 5.20827], [3136000, 9, 0, 3, 2, 5.05717], [3145728, 20, 1, 0, 0, 3.99286], [3149280, 5, 9, 1, 0, 4.80978], [3150000, 4, 2, 5, 1, 5.50203], [3161088, 10, 2, 0, 3, 5.23909], [3175200, 5, 4, 2, 2, 4.61965], [3176523, 0, 3, 0, 6,
# # Random snippet # { 'CGC_SENSOR_RELATED': 0x00, 'CGC_AIR_CONDITIONER_RELATED': 0x01, 'CGC_HOUSING_RELATED': 0x02, 'CGC_COOKING_RELATED': 0x03, 'CGC_HEALTH_RELATED': 0x04, 'CGC_MANAGEMENT_RELATED': 0x05, 'CGC_AV_RELATED': 0x06, 'CGC_PROFILE_CLASS': 0x0E, 'CGC_USER_DEFINITION_CLASS': 0x0F, } CGC_SENSOR_RELATED = 0x00 CGC_AIR_CONDITIONER_RELATED = 0x01 CGC_HOUSING_RELATED = 0x02 CGC_COOKING_RELATED = 0x03 CGC_HEALTH_RELATED = 0x04 CGC_MANAGEMENT_RELATED = 0x05 CGC_AV_RELATED = 0x06 CGC_PROFILE_CLASS = 0x0E CGC_USER_DEFINITION_CLASS = 0x0F # Echonet Lite Class Group Code (EOJ) { "CGC_SENSOR_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x00 # Note: Sensor related device class group 'CC_GAS_LEAK_SENSOR': 0x01, 'CC_CRIME_PREVENTION_SENSOR': 0x02, 'CC_EMERGENCY_BUTTON': 0x03, 'CC_FIRST_AID_SENSOR': 0x04, 'CC_EARTHQUAKE_SENSOR': 0x05, 'CC_ELECTRIC_LEAK_SENSOR': 0x06, 'CC_HUMAN_DETECTION_SENSOR': 0x07, 'CC_VISITOR_SENSOR': 0x08, 'CC_CALL_SENSOR': 0x09, 'CC_CONDENSATION_SENSOR': 0x0A, 'CC_AIR_POLLUTION_SENSOR': 0x0B, 'CC_OXYGEN_SENSOR': 0x0C, 'CC_ILLUMINANCE_SENSOR': 0x0D, 'CC_SOUND_SENSOR': 0x0E, 'CC_MAILING_SENSOR': 0x0F, 'CC_WEIGHT_SENSOR': 0x10, 'CC_TEMPERTURE_SENSOR': 0x11, 'CC_HUMIDITY_SENSOR': 0x12, 'CC_RAIN_SENSOR': 0x13, 'CC_WATER_LEVEL_SENSOR': 0x14, 'CC_BATH_WATER_LEVEL_SENSOR': 0x15, 'CC_BATH_HEATING_STATUS_SENSOR': 0x16, 'CC_WATER_LEAK_SENSOR': 0x17, 'CC_WATER_OVERFLOW_SENSOR': 0x18, 'CC_FIRE_SENSOR': 0x19, 'CC_CIGARETTE_SMOKE_SENSOR': 0x1A, 'CC_CO2_SENSOR': 0x1B, 'CC_GAS_SENSOR': 0x1C, 'CC_VOC_SENSOR': 0x1D, 'CC_DIFFERENTIAL_PRESSURE_SENSOR': 0x1E, 'CC_AIR_SPEED_SENSOR': 0x1F, 'CC_ODOR_SENSOR': 0x20, 'CC_FLAME_SENSOR': 0x21, 'CC_ELECTRIC_ENERGY_SENSOR': 0x22, 'CC_CURRENT_VALUE_SENSOR': 0x23, 'CC_DAYLIGHT_SENSOR': 0x24, 'CC_WATER_FLOW_RATE_SENSOR': 0x25, 'CC_MICROMOTION_SENSOR': 0x26, 'CC_PASSAGE_SENSOR': 0x27, 'CC_BED_PRESENCE_SENSOR': 0x28, 'CC_OPEN_CLOSE_SENSOR': 0x29, 'CC_ACTIVITY_AMOUNT_SENSOR': 0x2A, 'CC_HUMAN_BODY_LOCATION_SENSOR': 0x2B, 'CC_SNOW_SENSOR': 0x2C, 'CC_AIR_PRESSURE_SENSOR': 0x2D, }, "CGC_AIR_CONDITIONER_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x01 # Note: Air-conditional device class group 'CC_HOME_AIR_CONDITIONER': 0x30, 'CC_COLD_BLASTER': 0x31, 'CC_ELECTRIC_FAN': 0x32, 'CC_VENTILATION_FAN': 0x33, 'CC_AIR_CONDITIONER_VENTILATION_FAN': 0x34, 'CC_AIR_CLEANER': 0x35, 'CC_COLD_BLAST_FAN': 0x36, 'CC_CIRCULATOR': 0x37, 'CC_DEHUMIDIFIER': 0x38, 'CC_HUMIDIFIER': 0x39, 'CC_CEILING_FAN': 0x3A, 'CC_ELECTRIC_KOTATSU': 0x3B, 'CC_ELECTRIC_HEATING_PAD': 0x3C, 'CC_ELECTRIC_BLANKET': 0x3D, 'CC_SPACE_HEATER': 0x3E, 'CC_PANEL_HEATER': 0x3F, 'CC_ELECTRIC_CARPET': 0x40, 'CC_FLOOR_HEATER_0x01': 0x41, 'CC_ELECTRIC_HEATER': 0x42, 'CC_FAN_HEATER': 0x43, 'CC_BATTERY_CHARGER': 0x44, 'CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_INDOOR': 0x45, 'CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_OUTDOOR': 0x46, 'CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_THERMAL': 0x47, 'CC_COMMERCIAL_FAN_COIL_UNIT': 0x48, 'CC_COMMERCIAL_AIRCOND_COLD_SOURCE_CHILLER': 0x49, 'CC_COMMERCIAL_AIRCOND_HOT_SOURCE_BOILER': 0x50, 'CC_AIRCOND_VAV_FOR_COMMERCIAL_APPLICATIONS': 0x51, 'CC_AIRCOND_FOR_COMMERCIAL_APPLICATIONS': 0x52, 'CC_UNIT_COOLER': 0x53, 'CC_CONDENSING_UNIT_FOR_COMMERCIAL_APP': 0x54, 'CC_ELECTRIC_STORAGE_HEATER': 0x55, }, "CGC_HOUSING_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x02 # Note: Housing/facility device class group 'CC_ELECTRICALLY_OPERATED_BLIND': 0x60, 'CC_ELECTRICALLY_OPERATED_SHUTTER': 0x61, 'CC_ELECTRICALLY_OPERATED_CURTAIN': 0x62, 'CC_ELECTRICALLY_OPERATED_RAIN_SLIDING_DOOR': 0x63, 'CC_ELECTRICALLY_OPERATED_GATE': 0x64, 'CC_ELECTRICALLY_OPERATED_WINDOW': 0x65, 'CC_AUTOMATICALLY_OPERATED_ENTRANCE_DOOR': 0x66, 'CC_GARDEN_SPRINKLER': 0x67, 'CC_FIRE_SPRINKLER': 0x68, 'CC_FOUNTAIN': 0x69, 'CC_INSTANTANEOUS_WATER_HEATER': 0x6A, 'CC_ELECTRIC_WATER_HEATER': 0x6B, 'CC_SOLAR_WATER_HEATER': 0x6C, 'CC_CIRCULATION_PUMP': 0x6D, 'CC_BIDET_EQUIPPED_TOILET': 0x6E, 'CC_ELECTRIC_LOCK': 0x6F, 'CC_GAS_LINE_VALVE': 0x70, 'CC_HOME_SAUNA': 0x71, 'CC_HOT_WATER_GENERATOR': 0x72, 'CC_BATHROOM_DRYER': 0x73, 'CC_HOME_ELEVATOR': 0x74, 'CC_ELECTRICALLY_OPERATED_ROOM_DIVIDER': 0x75, 'CC_HORIZONTAL_TRANSFER': 0x76, 'CC_ELECTRICALLY_OPERATED_CLOTH_DRYING_POLE': 0x77, 'CC_SEPTIC_TANK': 0x78, 'CC_HOME_SOLAR_POWER_GENERATION': 0x79, 'CC_COLD_HOT_WATER_HEAT_SOURCE_EQUIPMENT': 0x7A, 'CC_FLOOR_HEATER_0x02': 0x7B, 'CC_FUEL_CELL': 0x7C, 'CC_STORAGE_BATTERY': 0x7D, 'CC_ELECTRIC_VEHICLE_CHARGER_DISCHARGER': 0x7E, 'CC_ENGINE_COGENERATION': 0x7F, 'CC_ELECTRIC_ENERGY_METER': 0x80, 'CC_WATER_FLOW_METER': 0x81, 'CC_GAS_METER': 0x82, 'CC_LP_GAS_METER': 0x83, 'CC_CLOCK': 0x84, 'CC_AUTOMATIC_DOOR': 0x85, 'CC_COMMERCIAL_ELEVATOR': 0x86, 'CC_DISTRIBUTION_PANEL_METERING': 0x87, 'CC_LOW_VOLTAGE_SMART_ELECTRIC_ENERGY_METER': 0x88, 'CC_SMART_GAS_METER': 0x89, 'CC_HIGH_VOLTAGE_SMART_ELECTRIC_ENERGY_METER': 0x8A, 'CC_GENERAL_LIGHTING_CLASS': 0x90, 'CC_SINGLE_FUNCTION_LIGHTING': 0x91, 'CC_EMERGENCY_LIGHTING': 0x99, 'CC_EQUIPMENT_LIGHT': 0x9D, 'CC_BUZZER': 0xA0, }, "CGC_COOKING_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x03 # Note: Cooking/Household-related device class group 'CC_COFFEE_MACHINE': 0xB0, 'CC_COFFEE_MILL': 0xB1, 'CC_ELECTRIC_HOT_WATER_POT': 0xB2, 'CC_ELECTRIC_STOVE': 0xB3, 'CC_TOASTER': 0xB4, 'CC_JUICER_FOOD_MIXER': 0xB5, 'CC_FOOD_PROCESSOR': 0xB6, 'CC_REFRIGERATOR': 0xB7, 'CC_COMBINATION_MICROWAVE_OVEN': 0xB8, 'CC_COOKING_HEATER': 0xB9, 'CC_OVEN': 0xBA, 'CC_RICE_COOKER': 0xBB, 'CC_ELECTRONIC_JAR': 0xBC, 'CC_DISH_WASHER': 0xBD, 'CC_DISH_DRYER': 0xBE, 'CC_ELECTRIC_RICE_CARD_COOKER': 0xBF, 'CC_KEEP_WARM_MACHINE': 0xC0, 'CC_RICE_MILL': 0xC1, 'CC_AUTOMATIC_BREAD_COOKER': 0xC2, 'CC_SLOW_COOKER': 0xC3, 'CC_ELECTRIC_PICKLES_COOKER': 0xC4, 'CC_WASHING_MACHINE': 0xC5, 'CC_CLOTHES_DRYER': 0xC6, 'CC_ELECTRIC_IRON': 0xC7, 'CC_TROUSER_PRESS': 0xC8, 'CC_FUTON_DRYER': 0xC9, 'CC_SMALL_ARTICLE_SHOES_DRYER': 0xCA, 'CC_ELECTRIC_VACUUM_CLEANER': 0xCB, 'CC_DISPOSER': 0xCC, 'CC_ELECTRIC_MOSQUITO_CATCHER': 0xCD, 'CC_COMMERCIAL_SHOW_CASE': 0xCE, 'CC_COMMERCIAL_REFRIGERATOR': 0xCF, 'CC_COMMERCIAL_HOT_CASE': 0xD0, 'CC_COMMERCIAL_FRYER': 0xD1, 'CC_COMMERCIAL_MICROWAVE_OVEN': 0xD2, 'CC_WASHER_AND_DRYER': 0xD3, 'CC_COMMERCIAL_SHOW_CASE_OUTDOOR_UNIT': 0xD4, }, "CGC_HEALTH_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x04 # Note: Health-related device class group 'CC_WEIGHTING_MACHINE': 0x01, 'CC_CLINICAL_THERMOMETER': 0x02, 'CC_BLOOD_PRESSURE_METER': 0x03, 'CC_BLOOD_SUGAR_METER': 0x04, 'CC_BODY_FAT_METER': 0x05, }, "CGC_MANAGEMENT_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x05 # Note: Management/operation-related device class group 'CC_SECURE_COMM_SHARED_KEY_SETUP_NODE': 0xFC, 'CC_SWITCH': 0xFD, 'CC_PORTABLE_TERMINAL': 0xFE, 'CC_CONTROLLER': 0xFF, }, "CGC_AV_RELATED": { # Echonet Lite Class Code (EOJ) # Class Group Code = 0x06 # Note: Audiovisual-related device class group 'CC_DISPLAY': 0x01, 'CC_TELEVISION': 0x02, 'CC_AUDIO': 0x03, 'CC_NETWORK_CAMERA': 0x04, }, "CGC_PROFILE_CLASS": { # Echonet Lite Class Code (EOJ) # Note: Class Group Code = 0x0E 'CC_NODE_PROFILE': 0xF0, }, "CGC_USER_DEFINITION_CLASS": { # Echonet Lite Instance Code (EOJ) # Note: 1. Only for Class Code = Profile Class (0x0E) # 2. only for Class Group Code = Node Profile Class (0xF0) 'IC_GENERAL_NODE': 0x01, 'IC_TRANSMISSION_ONLY_NODE': 0x02, }, } # Echonet Lite Class Code (EOJ) # Class Group Code = 0x00 # Note: Sensor related device class group CC_GAS_LEAK_SENSOR = 0x01 CC_CRIME_PREVENTION_SENSOR = 0x02 CC_EMERGENCY_BUTTON = 0x03 CC_FIRST_AID_SENSOR = 0x04 CC_EARTHQUAKE_SENSOR = 0x05 CC_ELECTRIC_LEAK_SENSOR = 0x06 CC_HUMAN_DETECTION_SENSOR = 0x07 CC_VISITOR_SENSOR = 0x08 CC_CALL_SENSOR = 0x09 CC_CONDENSATION_SENSOR = 0x0A CC_AIR_POLLUTION_SENSOR = 0x0B CC_OXYGEN_SENSOR = 0x0C CC_ILLUMINANCE_SENSOR = 0x0D CC_SOUND_SENSOR = 0x0E CC_MAILING_SENSOR = 0x0F CC_WEIGHT_SENSOR = 0x10 CC_TEMPERTURE_SENSOR = 0x11 CC_HUMIDITY_SENSOR = 0x12 CC_RAIN_SENSOR = 0x13 CC_WATER_LEVEL_SENSOR = 0x14 CC_BATH_WATER_LEVEL_SENSOR = 0x15 CC_BATH_HEATING_STATUS_SENSOR = 0x16 CC_WATER_LEAK_SENSOR = 0x17 CC_WATER_OVERFLOW_SENSOR = 0x18 CC_FIRE_SENSOR = 0x19 CC_CIGARETTE_SMOKE_SENSOR = 0x1A CC_CO2_SENSOR = 0x1B CC_GAS_SENSOR = 0x1C CC_VOC_SENSOR = 0x1D CC_DIFFERENTIAL_PRESSURE_SENSOR = 0x1E CC_AIR_SPEED_SENSOR = 0x1F CC_ODOR_SENSOR = 0x20 CC_FLAME_SENSOR = 0x21 CC_ELECTRIC_ENERGY_SENSOR = 0x22 CC_CURRENT_VALUE_SENSOR = 0x23 CC_DAYLIGHT_SENSOR = 0x24 CC_WATER_FLOW_RATE_SENSOR = 0x25 CC_MICROMOTION_SENSOR = 0x26 CC_PASSAGE_SENSOR = 0x27 CC_BED_PRESENCE_SENSOR = 0x28 CC_OPEN_CLOSE_SENSOR = 0x29 CC_ACTIVITY_AMOUNT_SENSOR = 0x2A CC_HUMAN_BODY_LOCATION_SENSOR = 0x2B CC_SNOW_SENSOR = 0x2C CC_AIR_PRESSURE_SENSOR = 0x2D # Echonet Lite Class Code (EOJ) # Class Group Code = 0x01 # Note: Air-conditional device class group CC_HOME_AIR_CONDITIONER = 0x30 CC_COLD_BLASTER = 0x31 CC_ELECTRIC_FAN = 0x32 CC_VENTILATION_FAN = 0x33 CC_AIR_CONDITIONER_VENTILATION_FAN = 0x34 CC_AIR_CLEANER = 0x35 CC_COLD_BLAST_FAN = 0x36 CC_CIRCULATOR = 0x37 CC_DEHUMIDIFIER = 0x38 CC_HUMIDIFIER = 0x39 CC_CEILING_FAN = 0x3A CC_ELECTRIC_KOTATSU = 0x3B CC_ELECTRIC_HEATING_PAD = 0x3C CC_ELECTRIC_BLANKET = 0x3D CC_SPACE_HEATER = 0x3E CC_PANEL_HEATER = 0x3F CC_ELECTRIC_CARPET = 0x40 CC_FLOOR_HEATER_0x01 = 0x41 CC_ELECTRIC_HEATER = 0x42 CC_FAN_HEATER = 0x43 CC_BATTERY_CHARGER = 0x44 CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_INDOOR = 0x45 CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_OUTDOOR = 0x46 CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_THERMAL = 0x47 CC_COMMERCIAL_FAN_COIL_UNIT = 0x48 CC_COMMERCIAL_AIRCOND_COLD_SOURCE_CHILLER = 0x49 CC_COMMERCIAL_AIRCOND_HOT_SOURCE_BOILER = 0x50 CC_AIRCOND_VAV_FOR_COMMERCIAL_APPLICATIONS = 0x51 CC_AIRCOND_FOR_COMMERCIAL_APPLICATIONS = 0x52 CC_UNIT_COOLER = 0x53 CC_CONDENSING_UNIT_FOR_COMMERCIAL_APP = 0x54 CC_ELECTRIC_STORAGE_HEATER = 0x55 # Echonet Lite Class Code (EOJ) # Class Group Code = 0x02 # Note: Housing/facility device class group CC_ELECTRICALLY_OPERATED_BLIND = 0x60 CC_ELECTRICALLY_OPERATED_SHUTTER = 0x61 CC_ELECTRICALLY_OPERATED_CURTAIN = 0x62 CC_ELECTRICALLY_OPERATED_RAIN_SLIDING_DOOR = 0x63 CC_ELECTRICALLY_OPERATED_GATE = 0x64 CC_ELECTRICALLY_OPERATED_WINDOW = 0x65 CC_AUTOMATICALLY_OPERATED_ENTRANCE_DOOR = 0x66 CC_GARDEN_SPRINKLER = 0x67 CC_FIRE_SPRINKLER = 0x68 CC_FOUNTAIN = 0x69 CC_INSTANTANEOUS_WATER_HEATER = 0x6A CC_ELECTRIC_WATER_HEATER = 0x6B CC_SOLAR_WATER_HEATER = 0x6C CC_CIRCULATION_PUMP = 0x6D CC_BIDET_EQUIPPED_TOILET = 0x6E CC_ELECTRIC_LOCK = 0x6F CC_GAS_LINE_VALVE = 0x70 CC_HOME_SAUNA = 0x71 CC_HOT_WATER_GENERATOR = 0x72 CC_BATHROOM_DRYER = 0x73 CC_HOME_ELEVATOR = 0x74 CC_ELECTRICALLY_OPERATED_ROOM_DIVIDER = 0x75 CC_HORIZONTAL_TRANSFER = 0x76 CC_ELECTRICALLY_OPERATED_CLOTH_DRYING_POLE = 0x77 CC_SEPTIC_TANK = 0x78 CC_HOME_SOLAR_POWER_GENERATION = 0x79 CC_COLD_HOT_WATER_HEAT_SOURCE_EQUIPMENT = 0x7A CC_FLOOR_HEATER_0x02 = 0x7B CC_FUEL_CELL = 0x7C CC_STORAGE_BATTERY = 0x7D CC_ELECTRIC_VEHICLE_CHARGER_DISCHARGER = 0x7E CC_ENGINE_COGENERATION = 0x7F CC_ELECTRIC_ENERGY_METER = 0x80 CC_WATER_FLOW_METER = 0x81 CC_GAS_METER = 0x82 CC_LP_GAS_METER = 0x83 CC_CLOCK = 0x84 CC_AUTOMATIC_DOOR = 0x85 CC_COMMERCIAL_ELEVATOR = 0x86 CC_DISTRIBUTION_PANEL_METERING = 0x87 CC_LOW_VOLTAGE_SMART_ELECTRIC_ENERGY_METER = 0x88 CC_SMART_GAS_METER = 0x89 CC_HIGH_VOLTAGE_SMART_ELECTRIC_ENERGY_METER = 0x8A CC_GENERAL_LIGHTING_CLASS = 0x90 CC_SINGLE_FUNCTION_LIGHTING = 0x91 CC_EMERGENCY_LIGHTING = 0x99 CC_EQUIPMENT_LIGHT = 0x9D CC_BUZZER = 0xA0 # Echonet Lite Class Code (EOJ) # Class Group Code = 0x03 # Note: Cooking/Household-related device class group CC_COFFEE_MACHINE = 0xB0 CC_COFFEE_MILL = 0xB1 CC_ELECTRIC_HOT_WATER_POT = 0xB2 CC_ELECTRIC_STOVE = 0xB3 CC_TOASTER = 0xB4 CC_JUICER_FOOD_MIXER = 0xB5 CC_FOOD_PROCESSOR = 0xB6 CC_REFRIGERATOR = 0xB7 CC_COMBINATION_MICROWAVE_OVEN = 0xB8 CC_COOKING_HEATER = 0xB9 CC_OVEN = 0xBA CC_RICE_COOKER = 0xBB CC_ELECTRONIC_JAR = 0xBC CC_DISH_WASHER = 0xBD CC_DISH_DRYER = 0xBE CC_ELECTRIC_RICE_CARD_COOKER = 0xBF CC_KEEP_WARM_MACHINE = 0xC0 CC_RICE_MILL = 0xC1 CC_AUTOMATIC_BREAD_COOKER = 0xC2 CC_SLOW_COOKER = 0xC3 CC_ELECTRIC_PICKLES_COOKER = 0xC4 CC_WASHING_MACHINE = 0xC5 CC_CLOTHES_DRYER = 0xC6 CC_ELECTRIC_IRON = 0xC7 CC_TROUSER_PRESS = 0xC8 CC_FUTON_DRYER = 0xC9 CC_SMALL_ARTICLE_SHOES_DRYER = 0xCA CC_ELECTRIC_VACUUM_CLEANER = 0xCB CC_DISPOSER = 0xCC CC_ELECTRIC_MOSQUITO_CATCHER = 0xCD CC_COMMERCIAL_SHOW_CASE = 0xCE CC_COMMERCIAL_REFRIGERATOR = 0xCF CC_COMMERCIAL_HOT_CASE = 0xD0 CC_COMMERCIAL_FRYER = 0xD1 CC_COMMERCIAL_MICROWAVE_OVEN = 0xD2 CC_WASHER_AND_DRYER = 0xD3 CC_COMMERCIAL_SHOW_CASE_OUTDOOR_UNIT = 0xD4 # Echonet Lite Class Code (EOJ) # Class Group Code = 0x04 # Note: Health-related device class group CC_WEIGHTING_MACHINE = 0x01 CC_CLINICAL_THERMOMETER = 0x02 CC_BLOOD_PRESSURE_METER = 0x03 CC_BLOOD_SUGAR_METER = 0x04 CC_BODY_FAT_METER = 0x05 # Echonet Lite Class Code (EOJ) # Class Group Code = 0x05 # Note: Management/operation-related device class group CC_SECURE_COMM_SHARED_KEY_SETUP_NODE = 0xFC CC_SWITCH = 0xFD CC_PORTABLE_TERMINAL = 0xFE CC_CONTROLLER = 0xFF # Echonet Lite Class Code (EOJ) # Class Group Code = 0x06 # Note: Audiovisual-related device class group CC_DISPLAY = 0x01 CC_TELEVISION = 0x02 CC_AUDIO = 0x03 CC_NETWORK_CAMERA = 0x04 # Echonet Lite Class Code (EOJ) # Note: Class Group Code = 0x0E CC_NODE_PROFILE = 0xF0 # Echonet Lite Instance Code (EOJ) # Note: 1. Only for Class Code = Profile Class (0x0E) # 2. only for Class Group Code = Node Profile Class (0xF0) IC_GENERAL_NODE = 0x01 IC_TRANSMISSION_ONLY_NODE = 0x02 { 'ESV_SetI': 0x60, 'ESV_SetC': 0x61, 'ESV_Get': 0x62, 'ESV_INF_REQ': 0x63, 'ESV_SetGet': 0x6E, 'ESV_Set_Res': 0x71, 'ESV_Get_Res': 0x72, 'ESV_INF': 0x73, 'ESV_INFC': 0x74, 'ESV_INFC_Res': 0x7A, 'ESV_SetGet_Res': 0x7E, 'ESV_SetI_SNA': 0x50, 'ESV_SetC_SNA': 0x51, 'ESV_Get_SNA': 0x52, 'ESV_INF_SNA': 0x53, 'ESV_SetGet_SNA': 0x5E, } # Echonet Lite Service (ESV) # Note: Service codes for request ESV_SetI = 0x60 ESV_SetC = 0x61 ESV_Get = 0x62 ESV_INF_REQ = 0x63 ESV_SetGet = 0x6E # Echonet Lite Service (ESV) # Note: Service codes for response/notification ESV_Set_Res = 0x71 ESV_Get_Res = 0x72 ESV_INF = 0x73 ESV_INFC = 0x74 ESV_INFC_Res = 0x7A ESV_SetGet_Res = 0x7E # Echonet Lite Service (ESV) # Note: Service codes for response not possible ESV_SetI_SNA = 0x50 ESV_SetC_SNA = 0x51 ESV_Get_SNA = 0x52 ESV_INF_SNA = 0x53 ESV_SetGet_SNA = 0x5E # Echonet Lite Processing Target Property Counters (OPC) "CC_GAS_LEAK_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_GAS_LEAK_OCCURRENCE_STATUS": "0xB1", "EPC_GAS_LEAK_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_CRIME_PREVENTION_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_INVASION_OCCURRENCE_STATUS": "0xB1", "EPC_INVASION_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_EMERGENCY_BUTTON": { "EPC_EMERGENCY_OCCURRENCE_STATUS": "0xB1", "EPC_EMERGENCY_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_FIRST_AID_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_FIRST_AID_OCCURRENCE_STATUS": "0xB1", "EPC_FIRST_AID_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_EARTHQUAKE_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_EARTHQUAKE_OCCURRENCE_STATUS": "0xB1", "EPC_EARTHQUAKE_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_ELECTRIC_LEAK_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_ELECTRIC_LEAK_OCCURRENCE_STATUS": "0xB1", "EPC_ELECTRIC_LEAK_OCCURRENCE_STATUS_RESET": "0xBF" }, "CC_HUMAN_DETECTION_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_HUMAN_DETECTION_STATUS": "0xB1" }, "CC_VISITOR_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_VISITOR_DETECTION_STATUS": "0xB1", "EPC_VISITOR_DETECTION_HOLDING_TIME": "0xBE" }, "CC_CALL_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_CALL_STATUS": "0xB1", "EPC_CALL_HOLDING_TIME": "0xBE" }, "CC_CONDENSATION_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_CONDENSATION_DETECTION_STATUS": "0xB1" }, "CC_AIR_POLLUTION_SENSOR": { "EPC_DETECTION_THRESHOLD_LEVEL": "0xB0", "EPC_AIR_POLLUTION_DETECTION_STATUS": "0xB1" },
element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkValueItemParent( policy_element=admx_policy, policy_name=this_policy_name, policy_key=this_key, policy_valueName=this_value_name, xpath_object=DISABLED_VALUE_XPATH, policy_file_data=policy_file_data, ): log.trace( "%s is disabled by detected DISABLED_VALUE_XPATH", this_policy_name ) this_policy_setting = "Disabled" policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting if ENABLED_LIST_XPATH(admx_policy): if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy): element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem( policy_element=admx_policy, policy_name=this_policy_name, policy_key=this_key, xpath_object=ENABLED_LIST_XPATH, policy_file_data=policy_file_data, ): log.trace( "%s is enabled by detected ENABLED_LIST_XPATH", this_policy_name ) this_policy_setting = "Enabled" policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting if DISABLED_LIST_XPATH(admx_policy): if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy): element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem( policy_element=admx_policy, policy_name=this_policy_name, policy_key=this_key, xpath_object=DISABLED_LIST_XPATH, policy_file_data=policy_file_data, ): log.trace( "%s is disabled by detected DISABLED_LIST_XPATH", this_policy_name ) this_policy_setting = "Disabled" policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting if not explicit_enable_disable_value_setting and this_value_name: # the policy has a key/valuename but no explicit Enabled/Disabled # Value or List # these seem to default to a REG_DWORD 1 = "Enabled" **del. = "Disabled" if _regexSearchRegPolData( re.escape( _buildKnownDataSearchString( reg_key=this_key, reg_valueName=this_value_name, reg_vtype="REG_DWORD", reg_data="1", ) ), policy_file_data, ): log.trace( "%s is enabled by no explicit enable/disable list or value", this_policy_name, ) this_policy_setting = "Enabled" policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting elif _regexSearchRegPolData( re.escape( _buildKnownDataSearchString( reg_key=this_key, reg_valueName=this_value_name, reg_vtype="REG_DWORD", reg_data=None, check_deleted=True, ) ), policy_file_data, ): log.trace( "%s is disabled by no explicit enable/disable list or value", this_policy_name, ) this_policy_setting = "Disabled" policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting full_names = {} hierarchy = {} if ELEMENTS_XPATH(admx_policy): if element_only_enabled_disabled or this_policy_setting == "Enabled": # TODO does this need to be modified based on the 'required' attribute? required_elements = {} configured_elements = {} policy_disabled_elements = 0 for elements_item in ELEMENTS_XPATH(admx_policy): for child_item in elements_item: this_element_name = _getFullPolicyName( policy_item=child_item, policy_name=child_item.attrib["id"], return_full_policy_names=return_full_policy_names, adml_language=adml_language, ) required_elements[this_element_name] = None child_key = child_item.attrib.get("key", this_key) child_value_name = child_item.attrib.get( "valueName", this_value_name ) if etree.QName(child_item).localname == "boolean": # https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx if child_item: if ( TRUE_VALUE_XPATH(child_item) and this_element_name not in configured_elements ): if _checkValueItemParent( policy_element=child_item, policy_name=this_policy_name, policy_key=child_key, policy_valueName=child_value_name, xpath_object=TRUE_VALUE_XPATH, policy_file_data=policy_file_data, ): configured_elements[this_element_name] = True log.trace( "element %s is configured true", child_item.attrib["id"], ) if ( FALSE_VALUE_XPATH(child_item) and this_element_name not in configured_elements ): if _checkValueItemParent( policy_element=child_item, policy_name=this_policy_name, policy_key=child_key, policy_valueName=child_value_name, xpath_object=FALSE_VALUE_XPATH, policy_file_data=policy_file_data, ): configured_elements[this_element_name] = False policy_disabled_elements = ( policy_disabled_elements + 1 ) log.trace( "element %s is configured false", child_item.attrib["id"], ) # WARNING - no standard ADMX files use true/falseList # so this hasn't actually been tested if ( TRUE_LIST_XPATH(child_item) and this_element_name not in configured_elements ): log.trace("checking trueList") if _checkListItem( policy_element=child_item, policy_name=this_policy_name, policy_key=this_key, xpath_object=TRUE_LIST_XPATH, policy_file_data=policy_file_data, ): configured_elements[this_element_name] = True log.trace( "element %s is configured true", child_item.attrib["id"], ) if ( FALSE_LIST_XPATH(child_item) and this_element_name not in configured_elements ): log.trace("checking falseList") if _checkListItem( policy_element=child_item, policy_name=this_policy_name, policy_key=this_key, xpath_object=FALSE_LIST_XPATH, policy_file_data=policy_file_data, ): configured_elements[this_element_name] = False policy_disabled_elements = ( policy_disabled_elements + 1 ) log.trace( "element %s is configured false", child_item.attrib["id"], ) else: if _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=True, ) ), policy_file_data, ): configured_elements[this_element_name] = False policy_disabled_elements = policy_disabled_elements + 1 log.trace( "element %s is configured false", child_item.attrib["id"], ) elif _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=False, ) ), policy_file_data, ): configured_elements[this_element_name] = True log.trace( "element %s is configured true", child_item.attrib["id"], ) elif etree.QName(child_item).localname in [ "decimal", "text", "longDecimal", "multiText", ]: # https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx if _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=True, ) ), policy_file_data, ): configured_elements[this_element_name] = "Disabled" policy_disabled_elements = policy_disabled_elements + 1 log.trace("element %s is disabled", child_item.attrib["id"]) elif _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=False, ) ), policy_data=policy_file_data, ): configured_value = _getDataFromRegPolData( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=False, ), policy_data=policy_file_data, ) configured_elements[this_element_name] = configured_value log.trace( "element %s is enabled, value == %s", child_item.attrib["id"], configured_value, ) elif etree.QName(child_item).localname == "enum": if _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=True, ) ), policy_file_data, ): log.trace( "enum element %s is disabled", child_item.attrib["id"] ) configured_elements[this_element_name] = "Disabled" policy_disabled_elements = policy_disabled_elements + 1 else: for enum_item in child_item: if _checkValueItemParent( policy_element=enum_item, policy_name=child_item.attrib["id"], policy_key=child_key, policy_valueName=child_value_name, xpath_object=VALUE_XPATH, policy_file_data=policy_file_data, ): if VALUE_LIST_XPATH(enum_item): log.trace("enum item has a valueList") if _checkListItem( policy_element=enum_item, policy_name=this_policy_name, policy_key=child_key, xpath_object=VALUE_LIST_XPATH, policy_file_data=policy_file_data, ): log.trace( "all valueList items exist in file" ) configured_elements[ this_element_name ] = _getAdmlDisplayName( adml_xml_data=adml_policy_resources, display_name=enum_item.attrib[ "displayName" ], ) break else: configured_elements[ this_element_name ] = _getAdmlDisplayName( adml_xml_data=adml_policy_resources, display_name=enum_item.attrib[ "displayName" ], ) break elif etree.QName(child_item).localname == "list": return_value_name = False if ( "explicitValue" in child_item.attrib and child_item.attrib["explicitValue"].lower() == "true" ): log.trace("explicitValue list, we will return value names") return_value_name = True regex_str = [ r"(?!\*", r"\*", "D", "e", "l", "V", "a", "l", "s", r"\.", ")", ] delvals_regex = "\x00".join(regex_str) delvals_regex = salt.utils.stringutils.to_bytes(delvals_regex) if _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=False, ) ) + delvals_regex, policy_data=policy_file_data, ): configured_value = _getDataFromRegPolData( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=False, ), policy_data=policy_file_data, return_value_name=return_value_name, ) configured_elements[this_element_name] = configured_value log.trace( "element %s is enabled values: %s", child_item.attrib["id"], configured_value, ) elif _regexSearchRegPolData( re.escape( _processValueItem( element=child_item, reg_key=child_key, reg_valuename=child_value_name, policy=admx_policy, parent_element=elements_item, check_deleted=True, ) ), policy_file_data, ): configured_elements[this_element_name] = "Disabled" policy_disabled_elements = policy_disabled_elements + 1 log.trace("element %s is disabled", child_item.attrib["id"]) if element_only_enabled_disabled: if 0 < len(required_elements.keys()) == len(configured_elements.keys()): if policy_disabled_elements == len(required_elements.keys()): log.trace( "%s is disabled by all enum elements", this_policy_name ) policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = "Disabled" else: log.trace("%s is enabled by enum elements", this_policy_name) policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = configured_elements else: policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting else: if this_policy_setting == "Enabled": policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = configured_elements else: policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting else: policy_vals.setdefault(this_policy_namespace, {})[ this_policy_name ] = this_policy_setting if ( return_full_policy_names and this_policy_namespace in policy_vals and this_policy_name in policy_vals[this_policy_namespace] ): full_names.setdefault(this_policy_namespace, {}) full_names[this_policy_namespace][this_policy_name] = _getFullPolicyName( policy_item=admx_policy, policy_name=admx_policy.attrib["name"], return_full_policy_names=return_full_policy_names, adml_language=adml_language, ) # Make sure the we're passing the full policy name # This issue was found when setting the `Allow Telemetry` setting # All following states would show a change in this setting # When the state does its first `lgpo.get` it would return `AllowTelemetry` # On the second run, it would return `Allow Telemetry` # This makes sure we're always returning the full_name when required if this_policy_name in policy_vals[this_policy_namespace][this_policy_name]: full_name = full_names[this_policy_namespace][this_policy_name] setting = policy_vals[this_policy_namespace][this_policy_name].pop( this_policy_name ) policy_vals[this_policy_namespace][this_policy_name][full_name] = setting if ( this_policy_namespace in policy_vals and this_policy_name in policy_vals[this_policy_namespace] ): hierarchy.setdefault(this_policy_namespace, {})[ this_policy_name ] = _build_parent_list( policy_definition=admx_policy, return_full_policy_names=return_full_policy_names, adml_language=adml_language, ) if policy_vals and return_full_policy_names and not hierarchical_return: log.debug("Compiling non hierarchical return...") unpathed_dict = {} pathed_dict = {} for policy_namespace in list(policy_vals): for policy_item in list(policy_vals[policy_namespace]): full_name = full_names[policy_namespace][policy_item] if full_name in policy_vals[policy_namespace]: # add this item with the path'd full name full_path_list = hierarchy[policy_namespace][policy_item] full_path_list.reverse() full_path_list.append(full_names[policy_namespace][policy_item]) policy_vals["\\".join(full_path_list)] = policy_vals[ policy_namespace ].pop(policy_item) pathed_dict[full_name] = True else: policy_vals[policy_namespace][full_name] = policy_vals[ policy_namespace ].pop(policy_item) unpathed_dict.setdefault(policy_namespace, {})[ full_name ] = policy_item # go back and remove any "unpathed" policies that need a full path for path_needed in unpathed_dict[policy_namespace]: # remove the item with the same full name and re-add it w/a path'd version full_path_list = hierarchy[policy_namespace][ unpathed_dict[policy_namespace][path_needed] ] full_path_list.reverse() full_path_list.append(path_needed) log.trace("full_path_list == %s", full_path_list) policy_vals["\\".join(full_path_list)] = policy_vals[ policy_namespace ].pop(path_needed) for policy_namespace in list(policy_vals): # Remove empty entries if policy_vals[policy_namespace] == {}: policy_vals.pop(policy_namespace) # Remove namespace and keep the values elif isinstance(policy_vals[policy_namespace], dict): if this_policy_namespace == policy_namespace and not hierarchical_return: policy_vals.update(policy_vals[policy_namespace]) policy_vals.pop(policy_namespace) if policy_vals and hierarchical_return: if hierarchy: log.debug("Compiling hierarchical return...") for policy_namespace in hierarchy: for hierarchy_item in hierarchy[policy_namespace]: if hierarchy_item in policy_vals[policy_namespace]: t_dict = {} first_item = True for item in hierarchy[policy_namespace][hierarchy_item]: new_dict = {} if first_item: h_policy_name = hierarchy_item if return_full_policy_names: h_policy_name = full_names[policy_namespace][ hierarchy_item ] new_dict[item] = { h_policy_name: policy_vals[policy_namespace].pop( hierarchy_item ) } first_item = False else: new_dict[item] = t_dict t_dict = new_dict if t_dict: policy_vals = dictupdate.update(policy_vals, t_dict) if ( policy_namespace in policy_vals and policy_vals[policy_namespace] == {} ): policy_vals.pop(policy_namespace) policy_vals = { policy_data.admx_registry_classes[policy_class]["lgpo_section"]: { "Administrative Templates": policy_vals } } return policy_vals def get_policy( policy_name, policy_class, adml_language="en-US", return_value_only=True, return_full_policy_names=True, hierarchical_return=False, ): r""" Get the current settings for a single policy on the machine Args: policy_name (str): The name of the policy to retrieve. Can be the any of the names or alieses returned by ``lgpo.get_policy_info`` policy_class (str): The policy class. Must be one of ``machine`` or ``user`` adml_language (str): The language code for the adml file to use for localization.
<filename>pset9/finance/application.py<gh_stars>0 import os from cs50 import SQL from flask import Flask, flash, redirect, render_template, request, session from flask_session import Session from tempfile import mkdtemp from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError from werkzeug.security import check_password_hash, generate_password_hash from helpers import apology, login_required, lookup, usd, validate_register_form, validate_buy_form, validate_cash # Configure application app = Flask(__name__) # Ensure templates are auto-reloaded app.config['TEMPLATES_AUTO_RELOAD'] = True # Ensure responses aren't cached @app.after_request def after_request(response): response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' response.headers['Expires'] = 0 response.headers['Pragma'] = 'no-cache' return response # Custom filter app.jinja_env.filters['usd'] = usd # Configure session to use filesystem (instead of signed cookies) app.config['SESSION_FILE_DIR'] = mkdtemp() app.config['SESSION_PERMANENT'] = False app.config['SESSION_TYPE'] = 'filesystem' Session(app) # Configure CS50 Library to use SQLite database db = SQL('sqlite:///finance.db') # Make sure API key is set if not os.environ.get('API_KEY'): raise RuntimeError('API_KEY not set') @app.route('/') @login_required def index(): """Show portfolio of stocks""" user_id = session.get('user_id') try: summary = db.execute( "SELECT symbol, name, price, shares \ FROM portfolio_entries \ JOIN companies \ ON portfolio_entries.company_id = companies.id \ WHERE portfolio_entries.user_id = ? AND portfolio_entries.shares != 0 \ GROUP BY symbol", user_id ) money_spend = 0 for company in summary: total_by_company = company['price'] * company['shares'] money_spend += total_by_company user_cash = db.execute('SELECT cash FROM users WHERE id = ? LIMIT 1', user_id) user_cash = user_cash[0]['cash'] except: return render_template('index.html', error='Something went wrong. Try again later.') return render_template('index.html', summary=summary, user_cash=user_cash, money_spend=money_spend) @app.route('/buy', methods=['GET', 'POST']) @login_required def buy(): """Buy shares of stock""" if request.method == 'GET': return render_template('buy.html') if request.method == 'POST': user_id = session.get('user_id') symbol = request.form.get('symbol') shares = request.form.get('shares') data = { 'symbol': symbol, 'shares': shares } errors = validate_buy_form(symbol, shares) if errors: return render_template('buy.html', data=data, errors=errors) user_cash = get_user_cash(user_id) if not user_cash: cash_error = 'Something went wrong. Try again later.' return render_template('buy.html', data=data, errors={ 'cash': cash_error }) company = lookup(symbol) update_company(company) if not company: quote_error = 'not found.' return render_template('buy.html', data=data, errors={ 'symbol': quote_error }) price = company['price'] total_price = price * int(shares) if user_cash < total_price: shares_error = 'Not enough money to buy that many shares.' return render_template('buy.html', data=data, errors={ 'message': shares_error }) if not buy_shares(user_id, company['symbol'], price, shares): cash_error = 'Something went wrong. Try again later.' return render_template('buy.html', data=data, errors={ 'message': cash_error }) return redirect('/') @app.route('/history') @login_required def history(): """Show history of transactions""" user_id = session.get('user_id') try: transactions = db.execute( "SELECT companies.symbol, transactions.shares, transactions.price, transactions.type, transactions.created_at \ FROM transactions \ JOIN companies \ ON transactions.company_id = companies.id \ WHERE transactions.user_id = ? ORDER BY created_at DESC", user_id ) except: return render_template('history.html', error='Something went wrong. Try again later.') return render_template('history.html', transactions=transactions) @app.route('/login', methods=['GET', 'POST']) def login(): """Log user in""" # Forget any user_id session.clear() # User reached route via POST (as by submitting a form via POST) if request.method == 'POST': # Ensure username was submitted if not request.form.get('username'): return apology('must provide username', 403) # Ensure password was submitted elif not request.form.get('password'): return apology('must provide password', 403) # Query database for username rows = db.execute('SELECT * FROM users WHERE username = ?', request.form.get('username')) # Ensure username exists and password is correct if len(rows) != 1 or not check_password_hash(rows[0]['hash'], request.form.get('password')): return apology('invalid username and/or password', 403) # Remember which user has logged in session['user_id'] = rows[0]['id'] # Redirect user to home page return redirect('/') # User reached route via GET (as by clicking a link or via redirect) else: return render_template('login.html') @app.route('/logout') def logout(): """Log user out""" # Forget any user_id session.clear() # Redirect user to login form return redirect('/') @app.route('/quote', methods=['GET', 'POST']) @login_required def quote(): """Get stock quote.""" if request.method == 'GET': return render_template('quote.html') if request.method == 'POST': symbol = request.form.get('symbol') company = lookup(symbol) update_company(company) if not company: message = 'not found.' return render_template('quote.html', message=message) return render_template('quoted.html', company=company) @app.route('/register', methods=['GET', 'POST']) def register(): """Register user""" if request.method == 'GET': return render_template('register.html', ) if request.method == 'POST': username = request.form.get('username') password = request.form.get('password') confirmation = request.form.get('confirmation') user = { 'username': username } errors = validate_register_form(username, password, confirmation) if errors: return render_template('register.html', errors=errors, user=user) try: current_user = db.execute('SELECT id FROM users WHERE username = ?', username) except: check_error = 'Something went wrong. Try again later.' return render_template('register.html', user=user, errors={ 'register': check_error }) if current_user: exist = 'already exist.' return render_template('register.html', user=user, errors={ 'username': exist }) user_id = register_user(username, password) if not user_id: register_error = 'Something went wrong. Try again later.' return render_template('register.html', user=user, errors={ 'register': register_error }) else: session['user_id'] = user_id return redirect('/') @app.route('/sell', methods=['GET', 'POST']) @login_required def sell(): """Sell shares of stock""" user_id = session.get('user_id') try: symbols_list = db.execute( "SELECT symbol FROM companies WHERE id IN (SELECT company_id FROM portfolio_entries WHERE user_id = ?)", user_id ) symbols_list = map(lambda o: o['symbol'], symbols_list) except: sell_error = 'Something went wrong. Try again later.' return render_template('sell.html', errors={ message: sell_error }) if request.method == 'GET': return render_template('sell.html', symbols_list=symbols_list) if request.method == 'POST': symbol = request.form.get('symbol') shares = request.form.get('shares') errors = validate_sell_form(user_id, symbol, shares) if errors: return render_template('sell.html', symbols_list=symbols_list, errors=errors) company = lookup(symbol) update_company(company) if not sell_shares(user_id, symbol, shares): sell_error = 'Something went wrong. Try again later.' return render_template('sell.html', symbols_list=symbols_list, errors={ message: sell_error }) return redirect('/') @app.route('/add-cash', methods=['GET', 'POST']) @login_required def cash(): """Add Cash.""" if request.method == 'GET': return render_template('cash.html') if request.method == 'POST': user_id = session.get('user_id') cash = request.form.get('cash') errors = validate_cash(cash) if errors: return render_template('cash.html', errors=errors) if not add_cash(user_id, cash): cash_error = 'Something went wrong. Try again later.' return render_template('cash.html', errors={ 'cash': cash_error }) return redirect('/') def errorhandler(e): """Handle error""" if not isinstance(e, HTTPException): e = InternalServerError() return apology(e.name, e.code) # Listen for errors for code in default_exceptions: app.errorhandler(code)(errorhandler) def register_user(username, password): password_hash = generate_password_hash(password) try: user_id = db.execute( 'INSERT INTO users (username, hash) VALUES (?, ?)', username, password_hash ) except: return False else: return user_id def get_user_cash(user_id): try: user_cash = db.execute('SELECT cash FROM users WHERE id = ?', user_id) except: return None else: return user_cash[0]['cash'] def buy_shares(user_id, symbol, price, shares): total_price = float(price) * int(shares) try: company_id = db.execute("SELECT id FROM companies WHERE symbol = ?", symbol) company_id = company_id[0]['id'] db.execute( "INSERT INTO transactions (user_id, company_id, shares, price, type, created_at) \ VALUES (?, ?, ?, ?, 'buy', datetime('now'))", user_id, company_id, shares, price ) db.execute( "INSERT INTO portfolio_entries (user_id, company_id, shares) \ VALUES (?, ?, ?) \ ON CONFLICT (company_id) \ DO UPDATE SET shares = shares + ?", user_id, company_id, shares, shares ) db.execute("UPDATE users SET cash = cash - ? WHERE id = ?", total_price, user_id) except: return False else: return True def sell_shares(user_id, symbol, shares): try: company_id = db.execute("SELECT id FROM companies WHERE symbol = ?", symbol) company_id = company_id[0]['id'] price = db.execute("SELECT price FROM companies WHERE id = ?", company_id) price = price[0]['price'] total_price = price * int(shares) db.execute( "INSERT INTO transactions (user_id, company_id, shares, price, type, created_at) \ VALUES (?, ?, ?, ?, 'sell', datetime('now'))", user_id, company_id, shares, price ) db.execute( "INSERT INTO portfolio_entries (user_id, company_id, shares) \ VALUES (?, ?, ?) \ ON CONFLICT (company_id) \ DO UPDATE SET shares = shares - ?", user_id, company_id, shares, shares ) db.execute("UPDATE users SET cash = cash + ? WHERE id = ?", total_price, user_id) except: return False else: return True def update_company(company): if not company: return False try: db.execute( "INSERT INTO companies (symbol, name, price) VALUES (?, ?, ?)", company['symbol'], company['name'], company['price'] ) except: return False else: return True def add_cash(user_id, cash): cash_int = int(cash) try: db.execute('UPDATE users SET cash = cash + ? WHERE id = ?', cash, user_id) except: return False else: return True def validate_sell_form(user_id, symbol, shares): blank = 'can\'t be blank.' positive = 'must be positive.' not_number = 'must be a number' errors = {} try: shares_int = int(shares) except: errors['shares'] = not_number else: if shares_int <= 0: errors['shares'] = positive if not shares: errors['shares'] = blank if not symbol: errors['symbol'] = blank return errors try: symbols_list = db.execute( "SELECT symbol FROM companies WHERE id IN (SELECT company_id FROM portfolio_entries WHERE user_id = ?)", user_id ) symbols_list = map(lambda o: o['symbol'], symbols_list) if symbol not in symbols_list: errors['symbol'] = 'not in portfolio.' return errors except: errors['message'] = 'Something went wrong. Try again later.' return errors try: user_shares = db.execute( "SELECT shares FROM portfolio_entries WHERE user_id = ? AND company_id IN (SELECT id FROM companies WHERE symbol = ?)", user_id, symbol )
<reponame>seifer08ms/docker-xx-net # -*- coding: utf-8 -*- """ hyper/http20/connection ~~~~~~~~~~~~~~~~~~~~~~~ Objects that build hyper's connection-level HTTP/2 abstraction. """ from ..tls import wrap_socket, H2_NPN_PROTOCOLS, H2C_PROTOCOL from ..common.exceptions import ConnectionResetError from ..common.bufsocket import BufferedSocket from ..common.headers import HTTPHeaderMap from ..common.util import to_host_port_tuple, to_native_string, to_bytestring from ..packages.hyperframe.frame import ( FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame, SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame, BlockedFrame, FRAME_MAX_LEN, FRAME_MAX_ALLOWED_LEN ) from ..packages.hpack.hpack_compat import Encoder, Decoder from .stream import Stream from .response import HTTP20Response, HTTP20Push from .window import FlowControlManager from .exceptions import ConnectionError, ProtocolError from . import errors import errno import logging import socket log = logging.getLogger(__name__) DEFAULT_WINDOW_SIZE = 65535 class HTTP20Connection(object): """ An object representing a single HTTP/2 connection to a server. This object behaves similarly to the Python standard library's ``HTTPConnection`` object, with a few critical differences. Most of the standard library's arguments to the constructor are irrelevant for HTTP/2 or not supported by hyper. :param host: The host to connect to. This may be an IP address or a hostname, and optionally may include a port: for example, ``'http2bin.org'``, ``'http2bin.org:443'`` or ``'127.0.0.1'``. :param port: (optional) The port to connect to. If not provided and one also isn't provided in the ``host`` parameter, defaults to 443. :param secure: (optional) Whether the request should use TLS. Defaults to ``False`` for most requests, but to ``True`` for any request issued to port 443. :param window_manager: (optional) The class to use to manage flow control windows. This needs to be a subclass of the :class:`BaseFlowControlManager <hyper.http20.window.BaseFlowControlManager>`. If not provided, :class:`FlowControlManager <hyper.http20.window.FlowControlManager>` will be used. :param enable_push: (optional) Whether the server is allowed to push resources to the client (see :meth:`get_pushes() <hyper.HTTP20Connection.get_pushes>`). :param ssl_context: (optional) A class with custom certificate settings. If not provided then hyper's default ``SSLContext`` is used instead. :param proxy_host: (optional) The proxy to connect to. This can be an IP address or a host name and may include a port. :param proxy_port: (optional) The proxy port to connect to. If not provided and one also isn't provided in the ``proxy`` parameter, defaults to 8080. """ def __init__(self, ssl_sock, host=None, ip=None, port=None, secure=None, window_manager=None, enable_push=False, ssl_context=None, proxy_host=None, proxy_port=None, **kwargs): """ Creates an HTTP/2 connection to a specific server. """ self.ip = ip if port is None: self.host, self.port = to_host_port_tuple(host, default_port=443) else: self.host, self.port = host, port if secure is not None: self.secure = secure elif self.port == 443: self.secure = True else: self.secure = False self._enable_push = enable_push self.ssl_context = ssl_context # Setup proxy details if applicable. if proxy_host: if proxy_port is None: self.proxy_host, self.proxy_port = to_host_port_tuple(proxy_host, default_port=8080) else: self.proxy_host, self.proxy_port = proxy_host, proxy_port else: self.proxy_host = None self.proxy_port = None #: The size of the in-memory buffer used to store data from the #: network. This is used as a performance optimisation. Increase buffer #: size to improve performance: decrease it to conserve memory. #: Defaults to 64kB. self.network_buffer_size = 65536 # Create the mutable state. self.__wm_class = window_manager or FlowControlManager self.__init_state() if ssl_sock: self._sock = BufferedSocket(ssl_sock, self.network_buffer_size) self._send_preamble() return def __init_state(self): """ Initializes the 'mutable state' portions of the HTTP/2 connection object. This method exists to enable HTTP20Connection objects to be reused if they're closed, by resetting the connection object to its basic state whenever it ends up closed. Any situation that needs to recreate the connection can call this method and it will be done. This is one of the only methods in hyper that is truly private, as users should be strongly discouraged from messing about with connection objects themselves. """ # Streams are stored in a dictionary keyed off their stream IDs. We # also save the most recent one for easy access without having to walk # the dictionary. # Finally, we add a set of all streams that we or the remote party # forcefully closed with RST_STREAM, to avoid encountering issues where # frames were already in flight before the RST was processed. self.streams = {} self.recent_stream = None self.next_stream_id = 1 self.reset_streams = set() # Header encoding/decoding is at the connection scope, so we embed a # header encoder and a decoder. These get passed to child stream # objects. self.encoder = Encoder() self.decoder = Decoder() # Values for the settings used on an HTTP/2 connection. self._settings = { SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE, SettingsFrame.SETTINGS_MAX_FRAME_SIZE: FRAME_MAX_LEN, } # The socket used to send data. self._sock = None # The inbound and outbound flow control windows. self._out_flow_control_window = 65535 # Instantiate a window manager. self.window_manager = self.__wm_class(65535) return def request(self, method, url, body=None, headers={}): """ This will send a request to the server using the HTTP request method ``method`` and the selector ``url``. If the ``body`` argument is present, it should be string or bytes object of data to send after the headers are finished. Strings are encoded as UTF-8. To use other encodings, pass a bytes object. The Content-Length header is set to the length of the body field. :param method: The request method, e.g. ``'GET'``. :param url: The URL to contact, e.g. ``'/path/segment'``. :param body: (optional) The request body to send. Must be a bytestring or a file-like object. :param headers: (optional) The headers to send on the request. :returns: A stream ID for the request. """ stream_id = self.putrequest(method, url) default_headers = (':method', ':scheme', ':authority', ':path') for name, value in headers.items(): is_default = to_native_string(name) in default_headers self.putheader(name, value, stream_id, replace=is_default) # Convert the body to bytes if needed. if body: body = to_bytestring(body) self.endheaders(message_body=body, final=True, stream_id=stream_id) return stream_id def _get_stream(self, stream_id): return (self.streams[stream_id] if stream_id is not None else self.recent_stream) def get_response(self, stream_id=None): """ Should be called after a request is sent to get a response from the server. If sending multiple parallel requests, pass the stream ID of the request whose response you want. Returns a :class:`HTTP20Response <hyper.HTTP20Response>` instance. If you pass no ``stream_id``, you will receive the oldest :class:`HTTPResponse <hyper.HTTP20Response>` still outstanding. :param stream_id: (optional) The stream ID of the request for which to get a response. :returns: A :class:`HTTP20Response <hyper.HTTP20Response>` object. """ stream = self._get_stream(stream_id) return HTTP20Response(stream.getheaders(), stream) def get_pushes(self, stream_id=None, capture_all=False): """ Returns a generator that yields push promises from the server. **Note that this method is not idempotent**: promises returned in one call will not be returned in subsequent calls. Iterating through generators returned by multiple calls to this method simultaneously results in undefined behavior. :param stream_id: (optional) The stream ID of the request for which to get push promises. :param capture_all: (optional) If ``False``, the generator will yield all buffered push promises without blocking. If ``True``, the generator will first yield all buffered push promises, then yield additional ones as they arrive, and terminate when the original stream closes. :returns: A generator of :class:`HTTP20Push <hyper.HTTP20Push>` objects corresponding to the streams pushed by the server. """ stream = self._get_stream(stream_id) for promised_stream_id, headers in stream.get_pushes(capture_all): yield HTTP20Push( HTTPHeaderMap(headers), self.streams[promised_stream_id] ) def connect(self): """ Connect to the server specified when the object was created. This is a no-op if we're already connected. :returns: Nothing. """ if self._sock is None: if not self.proxy_host: host = self.host port = self.port else: host = self.proxy_host port = self.proxy_port if self.ip: sock = socket.create_connection((self.ip, port), 5) else: sock = socket.create_connection((host, port), 5) if self.secure: assert not self.proxy_host, "Using a proxy with HTTPS not yet supported." sock, proto = wrap_socket(sock, host, self.ssl_context) else: proto = H2C_PROTOCOL log.debug("Selected NPN protocol: %s", proto) assert proto in H2_NPN_PROTOCOLS or proto == H2C_PROTOCOL self._sock = BufferedSocket(sock, self.network_buffer_size) self._send_preamble() return def _send_preamble(self): """ Sends the necessary HTTP/2 preamble. """ # We need to send the connection header immediately on this # connection, followed by an initial settings frame. self._sock.send(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n') f = SettingsFrame(0) f.settings[SettingsFrame.ENABLE_PUSH] = int(self._enable_push) self._send_cb(f) # The server will also send an initial settings frame, so get it. self._recv_cb() def close(self, error_code=None): """ Close the connection to the server. :param error_code: (optional) The error code to reset all streams with. :returns: Nothing. """ # Close all streams for stream in list(self.streams.values()):
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import datetime import logging import os import re from webkitpy.common.memoized import memoized from webkitpy.common.system.executive import Executive, ScriptError from .commitmessage import CommitMessage from .scm import AuthenticationError, SCM, commit_error_handler from .svn import SVN, SVNRepository _log = logging.getLogger(__name__) class AmbiguousCommitError(Exception): def __init__(self, num_local_commits, has_working_directory_changes): Exception.__init__(self, "Found %s local commits and the working directory is %s" % ( num_local_commits, ["clean", "not clean"][has_working_directory_changes])) self.num_local_commits = num_local_commits self.has_working_directory_changes = has_working_directory_changes class Git(SCM, SVNRepository): # Git doesn't appear to document error codes, but seems to return # 1 or 128, mostly. ERROR_FILE_IS_MISSING = 128 executable_name = 'git' def __init__(self, cwd, **kwargs): SCM.__init__(self, cwd, **kwargs) self._check_git_architecture() def _machine_is_64bit(self): import platform # This only is tested on Mac. if not platform.mac_ver()[0]: return False # platform.architecture()[0] can be '64bit' even if the machine is 32bit: # http://mail.python.org/pipermail/pythonmac-sig/2009-September/021648.html # Use the sysctl command to find out what the processor actually supports. return self.run(['sysctl', '-n', 'hw.cpu64bit_capable']).rstrip() == '1' def _executable_is_64bit(self, path): # Again, platform.architecture() fails us. On my machine # git_bits = platform.architecture(executable=git_path, bits='default')[0] # git_bits is just 'default', meaning the call failed. file_output = self.run(['file', path]) return re.search('x86_64', file_output) def _check_git_architecture(self): if not self._machine_is_64bit(): return # We could path-search entirely in python or with # which.py (http://code.google.com/p/which), but this is easier: git_path = self.run(['which', self.executable_name]).rstrip() if self._executable_is_64bit(git_path): return webkit_dev_thread_url = "https://lists.webkit.org/pipermail/webkit-dev/2010-December/015287.html" _log.warning("This machine is 64-bit, but the git binary (%s) does not support 64-bit.\nInstall a 64-bit git for better performance, see:\n%s\n" % (git_path, webkit_dev_thread_url)) def _run_git(self, command_args, **kwargs): full_command_args = [self.executable_name] + command_args full_kwargs = kwargs if not 'cwd' in full_kwargs: full_kwargs['cwd'] = self.checkout_root return self.run(full_command_args, **full_kwargs) @classmethod def in_working_directory(cls, path, executive=None): try: executive = executive or Executive() return executive.run_command([cls.executable_name, 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true" except OSError, e: # The Windows bots seem to through a WindowsError when git isn't installed. return False def find_checkout_root(self, path): # "git rev-parse --show-cdup" would be another way to get to the root checkout_root = self._run_git(['rev-parse', '--show-toplevel'], cwd=(path or "./")).strip() if not self._filesystem.isabs(checkout_root): # Sometimes git returns relative paths checkout_root = self._filesystem.join(path, checkout_root) return checkout_root def to_object_name(self, filepath): # FIXME: This can't be the right way to append a slash. root_end_with_slash = self._filesystem.join(self.find_checkout_root(self._filesystem.dirname(filepath)), '') # FIXME: This seems to want some sort of rel_path instead? return filepath.replace(root_end_with_slash, '') @classmethod def read_git_config(cls, key, cwd=None, executive=None): # FIXME: This should probably use cwd=self.checkout_root. # Pass --get-all for cases where the config has multiple values # Pass the cwd if provided so that we can handle the case of running webkit-patch outside of the working directory. # FIXME: This should use an Executive. executive = executive or Executive() return executive.run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n') @staticmethod def commit_success_regexp(): return "^Committed r(?P<svn_revision>\d+)$" def discard_local_commits(self): self._run_git(['reset', '--hard', self.remote_branch_ref()]) def local_commits(self): return self._run_git(['log', '--pretty=oneline', 'HEAD...' + self.remote_branch_ref()]).splitlines() def rebase_in_progress(self): return self._filesystem.exists(self.absolute_path(self._filesystem.join('.git', 'rebase-apply'))) def has_working_directory_changes(self): return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) != "" def discard_working_directory_changes(self): # Could run git clean here too, but that wouldn't match subversion self._run_git(['reset', 'HEAD', '--hard']) # Aborting rebase even though this does not match subversion if self.rebase_in_progress(): self._run_git(['rebase', '--abort']) def status_command(self): # git status returns non-zero when there are changes, so we use git diff name --name-status HEAD instead. # No file contents printed, thus utf-8 autodecoding in self.run is fine. return [self.executable_name, "diff", "--name-status", "--no-renames", "HEAD"] def _status_regexp(self, expected_types): return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types def add_list(self, paths): self._run_git(["add"] + paths) def delete_list(self, paths): return self._run_git(["rm", "-f"] + paths) def exists(self, path): return_code = self._run_git(["show", "HEAD:%s" % path], return_exit_code=True, decode_output=False) return return_code != self.ERROR_FILE_IS_MISSING def _branch_from_ref(self, ref): return ref.replace('refs/heads/', '') def _current_branch(self): return self._branch_from_ref(self._run_git(['symbolic-ref', '-q', 'HEAD']).strip()) def _upstream_branch(self): current_branch = self._current_branch() return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root, executive=self._executive).strip()) def merge_base(self, git_commit): if git_commit: # Rewrite UPSTREAM to the upstream branch if 'UPSTREAM' in git_commit: upstream = self._upstream_branch() if not upstream: raise ScriptError(message='No upstream/tracking branch set.') git_commit = git_commit.replace('UPSTREAM', upstream) # Special-case <refname>.. to include working copy changes, e.g., 'HEAD....' shows only the diffs from HEAD. if git_commit.endswith('....'): return git_commit[:-4] if '..' not in git_commit: git_commit = git_commit + "^.." + git_commit return git_commit return self.remote_merge_base() def changed_files(self, git_commit=None): # FIXME: --diff-filter could be used to avoid the "extract_filenames" step. status_command = [self.executable_name, 'diff', '-r', '--name-status', "--no-renames", "--no-ext-diff", "--full-index", self.merge_base(git_commit)] # FIXME: I'm not sure we're returning the same set of files that SVN.changed_files is. # Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R) return self.run_status_and_extract_filenames(status_command, self._status_regexp("ADM")) def _changes_files_for_commit(self, git_commit): # --pretty="format:" makes git show not print the commit log header, changed_files = self._run_git(["show", "--pretty=format:", "--name-only", git_commit]).splitlines() # instead it just prints a blank line at the top, so we skip the blank line: return changed_files[1:] def changed_files_for_revision(self, revision): commit_id = self.git_commit_from_svn_revision(revision) return self._changes_files_for_commit(commit_id) def revisions_changing_file(self, path, limit=5): # raise a script error if path does not exists to match the behavior of the svn implementation. if not self._filesystem.exists(path): raise ScriptError(message="Path %s does not exist." % path) # git rev-list head --remove-empty --limit=5 -- path would be equivalent. commit_ids = self._run_git(["log", "--remove-empty", "--pretty=format:%H", "-%s" % limit, "--", path]).splitlines() return filter(lambda revision: revision, map(self.svn_revision_from_git_commit, commit_ids)) def conflicted_files(self): # We do not need to pass decode_output for this diff command # as we're passing --name-status which does not output any data. status_command = [self.executable_name, 'diff', '--name-status', '--no-renames', '--diff-filter=U'] return self.run_status_and_extract_filenames(status_command, self._status_regexp("U")) def added_files(self): return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A")) def deleted_files(self): return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D")) @staticmethod def supports_local_commits(): return True def display_name(self): return "git" def _most_recent_log_matching(self, grep_str, path): # We use '--grep=' + foo rather than '--grep', foo because # git 1.7.0.4 (and earlier) didn't support the separate arg. return self._run_git(['log', '-1', '--grep=' + grep_str, '--date=iso', self.find_checkout_root(path)]) def svn_revision(self, path): git_log = self._most_recent_log_matching('git-svn-id:', path) match = re.search("^\s*git-svn-id:.*@(?P<svn_revision>\d+)\ ", git_log, re.MULTILINE) if not match: return "" return str(match.group('svn_revision')) def timestamp_of_revision(self, path, revision): git_log = self._most_recent_log_matching('git-svn-id:.*@%s' % revision, path) match = re.search("^Date:\s*(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) ([+-])(\d{2})(\d{2})$", git_log, re.MULTILINE) if not match: return "" # Manually modify the timezone since Git doesn't have an option to show it in UTC. # Git also truncates milliseconds but we're going to ignore that for now. time_with_timezone = datetime.datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)), int(match.group(5)), int(match.group(6)), 0) sign = 1 if match.group(7) == '+' else -1 time_without_timezone = time_with_timezone - datetime.timedelta(hours=sign * int(match.group(8)), minutes=int(match.group(9))) return time_without_timezone.strftime('%Y-%m-%dT%H:%M:%SZ') def prepend_svn_revision(self, diff): revision = self.head_svn_revision() if not revision: return diff return "Subversion Revision: " + revision + '\n' + diff def create_patch(self, git_commit=None, changed_files=None): """Returns a byte array (str()) representing the patch file. Patch files are effectively binary since they may contain files of multiple different encodings.""" # Put code changes at the top of
# schedule dates don't make sense, and should be set to execution date for # consistency with how execution_date is set for manually triggered tasks, i.e. # triggered_date == execution_date. if dag_run and dag_run.external_trigger: prev_execution_date = self.execution_date next_execution_date = self.execution_date else: prev_execution_date = task.dag.previous_schedule(self.execution_date) next_execution_date = task.dag.following_schedule(self.execution_date) next_ds = None next_ds_nodash = None if next_execution_date: next_ds = next_execution_date.strftime('%Y-%m-%d') next_ds_nodash = next_ds.replace('-', '') prev_ds = None prev_ds_nodash = None if prev_execution_date: prev_ds = prev_execution_date.strftime('%Y-%m-%d') prev_ds_nodash = prev_ds.replace('-', '') ds_nodash = ds.replace('-', '') ts_nodash = self.execution_date.strftime('%Y%m%dT%H%M%S') ts_nodash_with_tz = ts.replace('-', '').replace(':', '') yesterday_ds_nodash = yesterday_ds.replace('-', '') tomorrow_ds_nodash = tomorrow_ds.replace('-', '') ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}" ti_key_str = ti_key_str.format(**locals()) if task.params: params.update(task.params) if configuration.getboolean('core', 'dag_run_conf_overrides_params'): self.overwrite_params_with_dag_run_conf(params=params, dag_run=dag_run) class VariableAccessor: """ Wrapper around Variable. This way you can get variables in templates by using {var.value.your_variable_name}. """ def __init__(self): self.var = None def __getattr__(self, item): self.var = Variable.get(item) return self.var def __repr__(self): return str(self.var) class VariableJsonAccessor: """ Wrapper around deserialized Variables. This way you can get variables in templates by using {var.json.your_variable_name}. """ def __init__(self): self.var = None def __getattr__(self, item): self.var = Variable.get(item, deserialize_json=True) return self.var def __repr__(self): return str(self.var) return { 'dag': task.dag, 'ds': ds, 'next_ds': next_ds, 'next_ds_nodash': next_ds_nodash, 'prev_ds': prev_ds, 'prev_ds_nodash': prev_ds_nodash, 'ds_nodash': ds_nodash, 'ts': ts, 'ts_nodash': ts_nodash, 'ts_nodash_with_tz': ts_nodash_with_tz, 'yesterday_ds': yesterday_ds, 'yesterday_ds_nodash': yesterday_ds_nodash, 'tomorrow_ds': tomorrow_ds, 'tomorrow_ds_nodash': tomorrow_ds_nodash, 'END_DATE': ds, 'end_date': ds, 'dag_run': dag_run, 'run_id': run_id, 'execution_date': self.execution_date, 'prev_execution_date': prev_execution_date, 'next_execution_date': next_execution_date, 'latest_date': ds, 'macros': macros, 'params': params, 'tables': tables, 'task': task, 'task_instance': self, 'ti': self, 'task_instance_key_str': ti_key_str, 'conf': configuration, 'test_mode': self.test_mode, 'var': { 'value': VariableAccessor(), 'json': VariableJsonAccessor() }, 'inlets': task.inlets, 'outlets': task.outlets, } def overwrite_params_with_dag_run_conf(self, params, dag_run): if dag_run and dag_run.conf: params.update(dag_run.conf) def render_templates(self): task = self.task jinja_context = self.get_template_context() if hasattr(self, 'task') and hasattr(self.task, 'dag'): if self.task.dag.user_defined_macros: jinja_context.update( self.task.dag.user_defined_macros) rt = self.task.render_template # shortcut to method for attr in task.__class__.template_fields: content = getattr(task, attr) if content: rendered_content = rt(attr, content, jinja_context) setattr(task, attr, rendered_content) def email_alert(self, exception): exception_html = str(exception).replace('\n', '<br>') jinja_context = self.get_template_context() # This function is called after changing the state # from State.RUNNING so need to subtract 1 from self.try_number. jinja_context.update(dict( exception=exception, exception_html=exception_html, try_number=self.try_number - 1, max_tries=self.max_tries)) jinja_env = self.task.get_template_env() default_subject = 'Airflow alert: {{ti}}' # For reporting purposes, we report based on 1-indexed, # not 0-indexed lists (i.e. Try 1 instead of # Try 0 for the first attempt). default_html_content = ( 'Try {{try_number}} out of {{max_tries + 1}}<br>' 'Exception:<br>{{exception_html}}<br>' 'Log: <a href="{{ti.log_url}}">Link</a><br>' 'Host: {{ti.hostname}}<br>' 'Log file: {{ti.log_filepath}}<br>' 'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>' ) def render(key, content): if configuration.has_option('email', key): path = configuration.get('email', key) with open(path) as f: content = f.read() return jinja_env.from_string(content).render(**jinja_context) subject = render('subject_template', default_subject) html_content = render('html_content_template', default_html_content) send_email(self.task.email, subject, html_content) def set_duration(self): if self.end_date and self.start_date: self.duration = (self.end_date - self.start_date).total_seconds() else: self.duration = None def xcom_push( self, key, value, execution_date=None): """ Make an XCom available for tasks to pull. :param key: A key for the XCom :type key: str :param value: A value for the XCom. The value is pickled and stored in the database. :type value: any pickleable object :param execution_date: if provided, the XCom will not be visible until this date. This can be used, for example, to send a message to a task on a future date without it being immediately visible. :type execution_date: datetime """ if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date) def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): """ Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool """ if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids) @provide_session def get_num_running_task_instances(self, session): TI = TaskInstance return session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.state == State.RUNNING ).count() def init_run_context(self, raw=False): """ Sets the log context. """ self.raw = raw self._set_context(self) @functools.total_ordering class BaseOperator(LoggingMixin): """ Abstract base class for all operators. Since operators create objects that become nodes in the dag, BaseOperator contains many recursive methods for dag crawling behavior. To derive this class, you are expected to override the constructor as well as the 'execute' method. Operators derived from this class should perform or trigger certain tasks synchronously (wait for completion). Example of operators could be an operator that runs a Pig job (PigOperator), a sensor operator that waits for a partition to land in Hive (HiveSensorOperator), or one that moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these operators (tasks) target specific operations, running specific scripts, functions or data transfers. This class is abstract and shouldn't be instantiated. Instantiating a class derived from this one results in the creation of a task object, which ultimately becomes a node in DAG objects. Task dependencies should be set by using the set_upstream and/or set_downstream methods. :param task_id: a unique, meaningful id for the task :type task_id: str :param owner: the owner of the task, using the unix username is recommended :type owner: str :param retries: the number of retries that should be performed before failing the task :type retries: int :param retry_delay: delay between retries :type retry_delay: datetime.timedelta :param retry_exponential_backoff: allow progressive longer waits between retries by using exponential backoff algorithm on retry delay (delay will be converted into seconds) :type retry_exponential_backoff: bool :param max_retry_delay: maximum delay interval between retries :type max_retry_delay: datetime.timedelta :param start_date: The ``start_date`` for the task, determines the ``execution_date`` for the first task instance. The best practice is to have the start_date rounded to your DAG's ``schedule_interval``. Daily jobs have their start_date some day at 00:00:00, hourly jobs have their start_date at 00:00 of a specific hour. Note that Airflow simply looks at the latest ``execution_date`` and adds the ``schedule_interval`` to determine the next ``execution_date``. It is also very important to note that different tasks' dependencies need to line up in time. If task A depends on task B and their start_date are offset in a way that their execution_date don't line up, A's dependencies will never be met. If you are looking to delay a task, for example running a daily task at 2AM, look into the ``TimeSensor`` and ``TimeDeltaSensor``. We advise against using dynamic ``start_date`` and recommend using fixed ones. Read the FAQ entry about start_date for more information. :type start_date: datetime.datetime :param end_date: if specified, the scheduler won't go beyond this date :type end_date: datetime.datetime :param depends_on_past: when set to true, task instances will run sequentially while relying on the previous task's schedule to succeed. The task instance for the
<reponame>sedlakovi/mafTools ################################################## # Copyright (C) 2013 by # <NAME> (<EMAIL>, <EMAIL>) # ... and other members of the Reconstruction Team of <NAME>'s # lab (BME Dept. UCSC). # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ################################################## import os import random import sys import unittest sys.path.append(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../lib/'))) import mafToolsTest as mtt g_headers = ['''##maf version=1 scoring=tba.v8 # tba.v8 (((human chimp) baboon) (mouse rat)) ''', '''##maf version=1 scoring=tba.v8 # tba.v8 (((human chimp) baboon) (mouse rat)) '''] g_duplicateBlocks = [('''a score=0 #dup block 1, name4 is duplicate s target.chr0 38 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 gcagctgaaaaca s name.chr1 0 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name4.chrA 50 13 + 100 gcagctgaaaacT ''', '''a score=0 #dup block 1, name4 is duplicate s target.chr0 38 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 gcagctgaaaaca s name.chr1 0 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca ''',), ('''a score=0 #dup block 2, target is duplicate s name 0 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s target.chr0 158545457 13 - 158545518 gcagctgaaaacT s target.chr1 158545457 13 - 158545518 gcagctgaaaaca ''', '''a score=0 #dup block 2, target is duplicate s name 0 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s target.chr1 158545457 13 - 158545518 gcagctgaaaaca ''',), ('''a score=0 #dup block 3, panTro1 and baboon are duplicates s name 10 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 Acagctgaatact s target.chr0 62 9 + 158545518 gca---gaa-aca s baboon 249182 13 + 4622798 gcagctgaaaaca s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr7 28869787 13 + 161576975 gcagctgaatact s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA ''', '''a score=0 #dup block 3, panTro1 and baboon are duplicates s name 10 13 + 100 gcagctgaaaaca s name2.chr1 50 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s target.chr0 62 9 + 158545518 gca---gaa-aca s baboon 249182 13 + 4622798 gcagctgaaaaca s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr7 28869787 13 + 161576975 gcagctgaatact s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA ''',), ('''a score=0 #dup block 4, name, panTro1 and baboon are duplicates s name 10 13 + 100 gcagctgaaaaca s name.chr1 50 13 + 100 gcagctgaaaact s name.chr2 50 13 + 100 gcagctgaaaact s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 Acagctgaatact s target.chr0 62 9 + 158545518 gca---gaa-aca s baboon 249182 13 + 4622798 gcagctgaaaaca s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr7 28869787 13 + 161576975 gcagctgaatacT s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA ''', '''a score=0 #dup block 4, name, panTro1 and baboon are duplicates s name 10 13 + 100 gcagctgaaaaca s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name5 50 13 + 100 gcagctgaaaaca s target.chr0 62 9 + 158545518 gca---gaa-aca s baboon 249182 13 + 4622798 gcagctgaaaaca s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr7 28869787 13 + 161576975 gcagctgaatacT s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA ''',), ('''a score=0 #dup block 1, name4 is duplicate s target.chr0 38 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 gcagctgannnnn s name.chr1 0 13 + 100 gcagctgaaaacN s name2.chr1 50 13 + 100 gcagctgaaaacN s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca s name4.chrA 50 13 + 100 gcagctgaaaacT ''', '''a score=0 #dup block 1, name4 is duplicate s target.chr0 38 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 gcagctgannnnn s name.chr1 0 13 + 100 gcagctgaaaacN s name2.chr1 50 13 + 100 gcagctgaaaacN s name3.chr9 50 13 + 100 gcagctgaaaaca s name4.chr& 50 13 + 100 gcagctgaaaaca ''',), ] g_nonDuplicateBlocks = ['''a score=23262.0 #non-dup block 1 s hg18.chr7 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s baboon 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG ''', '''a score=5062.0 #non-dup block 2 s hg18.chr7 27699739 6 + 158545518 TAAAGA s panTro1.chr6 28862317 6 + 161576975 TAAAGA s baboon 241163 6 + 4622798 TAAAGA # ignore this comment line s mm4.chr6 53303881 6 + 151104725 TAAAGA s rn3.chr4 81444246 6 + 187371129 taagga q i dont remember what q lines do, but we should be ignoring them. ''', '''a score=6636.0 #non-dup block 3 # this comment line should not screw anything up s hg18.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA # nor should this comment line. ''', ] def mafIsFiltered(maf, blockList): f = open(maf) lastLine = mtt.processHeader(f) for i in xrange(0, len(blockList)): # walk through the maf, assessing the equivalence to the blockList items b = mtt.extractBlockStr(f, lastLine) lastLine = None if b != blockList[i]: print 'dang' print 'observed:' print b print '!=' print 'expected:' print blockList[i] return False return True class DuplicationFilterTest(unittest.TestCase): def testFilter(self): """ mafDuplicateFilter should filter out duplicates in blocks according to sequence similarity to the consensus. """ mtt.makeTempDirParent() for i in xrange(0, 10): shuffledBlocks = [] expectedOutput = [] tmpDir = os.path.abspath(mtt.makeTempDir('filter')) order = [1] * len(g_duplicateBlocks) + [0] * len(g_nonDuplicateBlocks) random.shuffle(order) random.shuffle(g_duplicateBlocks) random.shuffle(g_nonDuplicateBlocks) j, k = 0, 0 for dupBlock in order: if dupBlock: shuffledBlocks.append(g_duplicateBlocks[j][0]) expectedOutput.append(g_duplicateBlocks[j][1]) j += 1 else: shuffledBlocks.append(g_nonDuplicateBlocks[k]) expectedOutput.append(g_nonDuplicateBlocks[k]) k += 1 testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), ''.join(shuffledBlocks), g_headers) parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')), '--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))] outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))] mtt.recordCommands([cmd], tmpDir, outPipes=outpipes) mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes) self.assertTrue(mafIsFiltered(os.path.join(tmpDir, 'filtered.maf'), expectedOutput)) mtt.removeDir(tmpDir) def testNonFilter(self): """ mafDuplicateFilter should not filter out any sequences from blocks when there are no duplicates. """ mtt.makeTempDirParent() for i in xrange(0, 10): tmpDir = os.path.abspath(mtt.makeTempDir('nonFilter')) random.shuffle(g_nonDuplicateBlocks) testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), ''.join(g_nonDuplicateBlocks), g_headers) expectedOutput = g_nonDuplicateBlocks parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')), '--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))] outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))] mtt.recordCommands([cmd], tmpDir, outPipes=outpipes) mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes) self.assertTrue(mafIsFiltered(os.path.join(tmpDir, 'filtered.maf'), expectedOutput)) mtt.removeDir(tmpDir) def testMemory1(self): """ If valgrind is installed on the system, check for memory related errors (1). """ mtt.makeTempDirParent() valgrind = mtt.which('valgrind') if valgrind is None: return for i in xrange(0, 10): shuffledBlocks = [] expectedOutput = [] tmpDir = os.path.abspath(mtt.makeTempDir('memory1')) order = [1] * len(g_duplicateBlocks) + [0] * len(g_nonDuplicateBlocks) random.shuffle(order) random.shuffle(g_duplicateBlocks) random.shuffle(g_nonDuplicateBlocks) j, k = 0, 0 for dupBlock in order: if dupBlock: shuffledBlocks.append(g_duplicateBlocks[j][0]) expectedOutput.append(g_duplicateBlocks[j][1]) j += 1
= task_df.apply(compute_surprise_bits, axis=1) if len(surprise_series) == 0: surprise_series[(task_group_models[0].group_def,)] = [numpy.NaN] * len(task_df) return pandas.DataFrame(surprise_series) def impute_missing_cols(features_df, value=0.0): features_df = features_df.copy() for col in features_df: if features_df[col].isnull().all(): features_df[col].fillna(value, inplace=True) return features_df def build_pca_df(features_df, *, n_components="mle", fig=None): # DRAGONS - the imputer works well, but removes features entirely when there's no data. We # don't want this - we just the PCA to discard them since otherwise it screws up our # dimensionalities of components later on. features_df = impute_missing_cols(features_df) features = features_df.values features = SimpleImputer(missing_values=numpy.NaN, strategy="median").fit_transform(features) # features = StandardScaler().fit_transform(features) pca = PCA(n_components=n_components, whiten=True) metrics = pca.fit_transform(features) metrics_df = pandas.DataFrame( metrics, index=features_df.index, columns=[f"pca{i}" for i in range(0, len(metrics[0]))] ) metrics_df.pca = pca if fig: title = None if isinstance(fig, str): title = fig fig = pyplot.figure() if isinstance(fig, bool): title = "PCA Values" fig = pyplot.figure() axs = fig.add_subplot(1, 1, 1) axs.scatter(metrics_df.iloc[:, 0], metrics_df.iloc[:, 1]) # zip joins x and y coordinates in pairs for i, (x, y) in enumerate(zip(metrics_df.iloc[:, 0], metrics_df.iloc[:, 1])): # this method is called for each point axs.annotate( metrics_df.index[i], # this is the text (x, y), # this is the point to label textcoords="offset points", # how to position the text xytext=(0, 10), # distance from text to points (x,y) ha="center", ) # horizontal alignment can be left, right or center axs.set_title(title + " " if title else "") axs.set_ylabel(metrics_df.columns[0]) axs.set_xlabel(metrics_df.columns[1]) fig.set_size_inches(8, 8) metrics_df.fig = fig return metrics_df # HACK since we don't really have a nice way to convert back to a probability density from a percentile def est_logpdf_for_percentile(model, outer_pct): n_samples = 100 * 1000 if len(model.mean) > 1 else 1000 logpdf_for_percentile = numpy.percentile(model.logpdf(model.rvs(n_samples)), outer_pct * 100) return logpdf_for_percentile def build_mv_normality_model(metrics_df, *, outlier_pct=0.0, fig=None, fig_pct_cutoffs=[1, 50, 99]): metrics = metrics_df.values mean = numpy.mean(metrics, axis=0) cov = numpy.cov(metrics, rowvar=0) if len(metrics_df) > 1 else [1.0] dist_model = multivariate_normal(mean, cov, allow_singular=True) outlier_logpdf_cutoff = est_logpdf_for_percentile( dist_model, outlier_pct if len(metrics_df) > 3 else 0.0 ) # TODO: Figure out how to compute pdf boundary directly for N% of results outlier_test = pandas.Series([(dist_model.logpdf(m) < outlier_logpdf_cutoff) for m in metrics]) nonoutliers_df = metrics_df.iloc[list(~outlier_test), :] outliers_df = metrics_df.iloc[list(outlier_test), :] nonoutliers = nonoutliers_df.values mean = numpy.mean(nonoutliers, axis=0) cov = numpy.cov(nonoutliers, rowvar=0) if len(metrics_df) > 1 else 1.0 normal_model = multivariate_normal(mean, cov, allow_singular=True) normal_model.nonoutliers = nonoutliers_df normal_model.outliers = outliers_df if fig: title = None if isinstance(fig, str): title = fig fig = pyplot.figure() if isinstance(fig, bool): title = "Normality Model" fig = pyplot.figure() axs = fig.add_subplot(1, 1, 1) metrics_x, metrics_y = list(zip(*metrics_df.iloc[:, 0:2].values)) mean_metrics_other = list(mean[2:]) def pretty_range(metrics, n_steps=500, overfill=1.1): m_min, m_max = min(metrics), max(metrics) m_center = (m_max + m_min) / 2.0 m_radius = abs((m_max - m_min) / 2.0) * overfill m_step = (m_radius * 2.0) / float(n_steps) m_min = m_center - m_radius return m_min, m_min + m_step * n_steps, m_step x_min, x_max, x_step = pretty_range(metrics_x) y_min, y_max, y_step = pretty_range(metrics_y) x, y = numpy.mgrid[x_min:x_max:x_step, y_min:y_max:y_step] x_y = numpy.dstack((x, y)) def xy_to_metric(x, y): return tuple([x, y] + mean_metrics_other) metrics_x_y = numpy.apply_along_axis(lambda xy: xy_to_metric(*xy), -1, x_y) def mark_pdf_range(v_arr): return numpy.array(list(map(lambda v: -1 if False else v, v_arr))) def marked_pdf(metrics): return numpy.apply_along_axis(mark_pdf_range, -1, normal_model.pdf(metrics)) values_x_y = marked_pdf(metrics_x_y) min_value = min(min(r) for r in values_x_y) max_value = max(max(r) for r in values_x_y) contours = axs.contourf(x, y, values_x_y, levels=numpy.linspace(min_value, max_value, 5)) # axs.contour(contours, levels=[numpy.exp(outlier_logpdf_cutoff)], colors='red') for fig_pct_cutoff in fig_pct_cutoffs: fig_logpdf_cutoff = est_logpdf_for_percentile(normal_model, fig_pct_cutoff) axs.contour( contours, levels=[numpy.exp(fig_logpdf_cutoff)], colors="orange", alpha=0.75 ) axs.scatter(metrics_x, metrics_y, alpha=0.5, color="white") axs.set_title(title + " " if title else "") # axs.clabel(contours, [outlier_pdf_cutoff, fig_pdf_cutoff], inline=True) axs.set_ylabel(metrics_df.columns[1]) axs.set_xlabel(metrics_df.columns[0]) fig.set_size_inches(8, 8) normal_model.fig = fig return normal_model def impute_zero_sparse_rows(arr): def impute_zero_sparse_row(row): if numpy.sum(numpy.nan_to_num(row)) > 0: return row return row + 1.0 return numpy.apply_along_axis(impute_zero_sparse_row, 1, arr) def build_feature_abnormality_df(features_df, metrics_df, normal_model): abnormal_vs = metrics_df.values - normal_model.mean feature_vs = features_df - metrics_df.pca.mean_ # Compute the total proportion of abnormality distance that each feature vector component is responsible for feature_sensitivity = numpy.matmul( numpy.abs(abnormal_vs), numpy.abs(metrics_df.pca.components_) ) feature_contributions = numpy.multiply( feature_sensitivity, numpy.nan_to_num(impute_zero_sparse_rows(abs(feature_vs))) ) col_index = pandas.MultiIndex.from_tuples( list(map(lambda t: t + ("value",), features_df.columns.to_flat_index())) + list(map(lambda t: t + ("abn_cont",), features_df.columns.to_flat_index())), names=features_df.columns.names + ("meta",), ) contribution_df = pandas.DataFrame(feature_contributions, index=metrics_df.index) abnormal_df = pandas.concat([features_df, contribution_df], axis=1) abnormal_df.columns = col_index return abnormal_df def build_abnormal_task_model_figures(task_df, elapsed_time_col, task_time_models, abnormal_df): abnormal_cols = list( filter( lambda mc: isinstance(mc, (list, tuple)) and mc[-1] == "abn_cont", abnormal_df.columns ) ) model_feature_map = {} for abnormal_col in abnormal_cols: group_def = abnormal_col[0] value_cols, abnormal_cols = model_feature_map.setdefault(group_def, ([], [])) value_cols.append(abnormal_col[0:-1] + ("value",)) abnormal_cols.append(abnormal_col) model_by_def = {} # model_value = {} model_abnormality = {} for model in task_time_models: model_by_def[model.group_def] = model if model.group_def not in model_feature_map: continue value_cols, abnormal_cols = model_feature_map[model.group_def] model_abnormality[model.group_def] = abnormal_df[abnormal_cols].sum(axis=1) # model_value[model.group_def] = abnormal_df[value_cols].sum(axis=1) model_abnormality_df = pandas.DataFrame(model_abnormality) model_abnormality_df.columns = model_abnormality_df.columns.to_flat_index() def to_tuple(v): if isinstance(v, tuple): return v if isinstance(v, list): return tuple(v) return (v,) class Comparisons: def __init__(self, task_group_def): self.task_group_def = task_group_def self.model_comparisons = [] task_group_cols = to_tuple(abnormal_df.index.names) all_comparisons = [] for i in range(0, len(abnormal_df)): task_group_def = tuple(zip(task_group_cols, to_tuple(abnormal_df.iloc[i, :].name))) comparisons = Comparisons(task_group_def) all_comparisons.append(comparisons) task_group_task_mask = None for col, value in task_group_def: next_mask = task_df[col] == value task_group_task_mask = ( task_group_task_mask & next_mask if task_group_task_mask is not None else next_mask ) abnormality_values = model_abnormality_df.iloc[i, :].values ordered_abnormality = sorted( zip(abnormality_values, list(model_abnormality_df.columns)), reverse=True ) for abnormality, group_def in ordered_abnormality: model = model_by_def[group_def] model_task_mask = None for col, value in model.group_def: next_mask = task_df[col] == value model_task_mask = ( model_task_mask & next_mask if model_task_mask is not None else next_mask ) model_task_df = task_df.loc[task_group_task_mask & model_task_mask] if len(model_task_df) == 0: continue comparisons.model_comparisons.append((abnormality, model, model_task_df)) max_abnormality = max(model_abnormality_df.max()) active_models = list(filter(lambda m: m.group_def in model_feature_map, task_time_models)) AbnormalFigures = namedtuple("AbnormalFigures", ["bullseye", "histogram"]) all_figures = {} for ci, comparisons in enumerate(all_comparisons): def pretty_vals(group_def): return " - ".join(str(v) for k, v in group_def) def pretty_spoke(group_def): return "Task " + "\n".join(str(v) for k, v in group_def) # if pretty_vals(comparisons.task_group_def) != 'protiva': # continue fig = pyplot.figure() comparison_map = dict( (comparison[1].group_def, comparison) for comparison in comparisons.model_comparisons ) theta = numpy.linspace(0, 2 * numpy.pi, len(active_models), endpoint=False) axs = fig.add_subplot(1, 1, 1, projection="polar") abnormality = numpy.array( list( map( lambda m: comparison_map[m.group_def][0] if m.group_def in comparison_map else 0, active_models, ) ) ) # print("abnormality", abnormality) spoke_labels = list(map(lambda m: pretty_spoke(m.group_def), active_models)) axs.set_thetagrids(numpy.degrees(theta), spoke_labels, weight="bold", size="medium") axs.tick_params(rotation="auto", pad=1.2) # axs.set_varlabels(spoke_labels, weight='bold', size='medium') alpha = (abnormality / max_abnormality) * 0.9 + 0.1 rgb_colors = list(map(lambda m: colorhash.ColorHash(m.group_def).rgb, active_models)) rgba_colors = numpy.zeros((len(abnormality), 4)) rgba_colors[:, 0:3] = numpy.array(rgb_colors) / 255.0 rgba_colors[:, 3] = alpha axs.scatter( theta, abnormality, c=rgba_colors, s=(((abnormality / max_abnormality) * 30.0) ** 2) * numpy.pi, marker="o", ) axs.set_rlim(0, max_abnormality + 1.0) rgrids = numpy.arange(0, max_abnormality + 1.0, 1.0) axs.set_rgrids(rgrids, labels=[""] * len(rgrids)) axs.set_title( f"Task Completion Time Normality for\n{pretty_vals(comparisons.task_group_def)}\n", weight="bold", size="large", ) for spine in axs.spines.values(): spine.set_edgecolor("lightgray") # fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05) fig.set_size_inches(8, 8) bullseye = fig fig = pyplot.figure() cols = math.ceil(math.sqrt(len(active_models))) rows = math.ceil(len(active_models) / cols) # for i, comparison in enumerate(comparisons.model_comparisons): for i, model in enumerate(active_models): axs = fig.add_subplot(rows, cols, i + 1) axs_pdf = axs.twinx() comparison = comparison_map.get(model.group_def, None) if comparison is None: continue abnormality, model, model_task_df = comparison # binsize = model.event_resolution # bins = numpy.arange(0, max(model.ppf(0.99), max(model_task_df[elapsed_time_col])) + binsize, binsize) binrange = max(model.ppf(0.99), max(model_task_df[elapsed_time_col])) bins = numpy.linspace(0, binrange, 10) # bins_99 = model.ppf(numpy.linspace(0.01, 0.99, 99)) # last_bin_size = bins_99[-1] - bins_99[-2] # bins = [0] + list(bins_99) + [max(bins_99[-1] + last_bin_size, max(model_task_df[elapsed_time_col]))] rgb_color = numpy.array(colorhash.ColorHash(model.group_def).rgb) / 255.0 # axs.hist(model_task_df[elapsed_time_col], bins=bins, alpha=0.05, color='purple') axs.hist( model_task_df[elapsed_time_col], bins=bins, alpha=(abnormality / max_abnormality) * 0.9 + 0.1, color=rgb_color, ) pdf_range = model.interval(0.99) pdf_x = numpy.linspace(pdf_range[0], pdf_range[1], 100) axs_pdf.plot( pdf_x, model.pdf(pdf_x), alpha=(1.0 - model.expected_err_pct) * 0.9 + 0.1, color="black", linewidth=3.0, ) axs.set_title( f"Task {pretty_vals(model.group_def)}\n{pretty_vals(comparisons.task_group_def)}\n(Ab:{abnormality:.2f} ErrPct:{model.expected_err_pct:.2f})\n" ) axs.set_ylabel("# of Tasks Completed") axs_pdf.set_ylabel("Modeled Probability") axs.set_xlabel("Task Completion Time (mins)") # axs.set_xscale('log') # axs_pdf.set_xscale('log') fig.subplots_adjust(wspace=0.4, hspace=0.25) fig.set_size_inches(5 * cols, 10 * rows) histogram = fig _, task_group_index_values = zip(*comparisons.task_group_def) all_figures[maybe_tuple(task_group_index_values)] = AbnormalFigures(bullseye, histogram) return all_figures def maybe_tuple(val): val = tuple(val) if len(val) == 1: return val[0] return val def to_tuple(val): if isinstance(val, (tuple, list)): return tuple(val) return (val,) def to_dt(val): if isinstance(val, (list, tuple)): return list(map(to_dt, val)) return pandas.to_datetime(val,
bins, where each marginal bin contains the same number of samples. Then the marginal entropies have equal probable distributions H_x = H_y = log(bins) The calculation ranges are shown below:: (-------------------------total_time--------------------------) (---tau_max---)(---------corr_range------------)(---tau_max---) MI is calculated about corr_range and with the other time series shifted by tau Possible choices for lag_mode: - "all" will return the full function for all lags, possible large memory need if only_tri is True, only the upper triangle contains the values, the lower one is zeros - "sum" will return the sum over positive and negative lags seperatly, each inclunding tau=0 corrmat[0] is the positive sum, corrmat[1] the negative sum - "max" will return only the maximum coupling (in corrmat[0]) and its lag (in corrmat[1]) :arg int bins: number of bins for estimating MI :arg int tau_max: maximum lag in both directions, including last lag :arg str lag_mode: output mode :rtype: 3D numpy array (float) [index, index, index] :return: correlation matrix with different lag_mode choices """ if bins < 255: dtype = 'uint8' else: dtype = 'int16' # Normalize anomaly time series to zero mean and unit variance for all # lags, array contains normalizations for all lags corr_range = self.total_time - 2*tau_max # get the bin quantile steps bin_edge = numpy.ceil(corr_range/float(bins)) symbolic_array = numpy.empty((2*tau_max + 1, self.N, corr_range), dtype=dtype) for t in range(2*tau_max + 1): array = self.dataarray[:, t:t+corr_range] # get the lower edges of the bins for every time series edges = numpy.sort(array, axis=1)[:, ::bin_edge] bins = edges.shape[1] # This gives the symbolic time series symbolic_array[t] = \ (array.reshape(self.N, corr_range, 1) >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1 return self._calculate_mi(symbolic_array, corr_range=corr_range, bins=bins, tau_max=tau_max, lag_mode=lag_mode) def mutual_information_edges(self, bins=16, tau=0, lag_mode='all'): """ Returns the normalized mutual information from all pairs of nodes from a range of time lags. MI = H_x + H_y - H_xy Uses adaptive bins, where each marginal bin contains the same number of samples. Then the marginal entropies have equal probable distributions H_x = H_y = log(bins) The calculation ranges are shown below:: (-------------------------total_time--------------------------) (---tau_max---)(---------corr_range------------)(---tau_max---) MI is calculated about corr_range and with the other time series shifted by tau Possible choices for lag_mode: - "all" will return the full function for all lags, possible large memory need if only_tri is True, only the upper triangle contains the values, the lower one is zeros - "sum" will return the sum over positive and negative lags seperatly, each inclunding tau=0 corrmat[0] is the positive sum, corrmat[1] the negative sum - "max" will return only the maximum coupling (in corrmat[0]) and its lag (in corrmat[1]) :arg int bins: number of bins for estimating MI :arg int tau_max: maximum lag in both directions, including last lag :arg str lag_mode: output mode :rtype: 2D numpy array (float) [index, index] :return: bin edges for zero lag """ # get the bin quantile steps bin_edge = numpy.ceil(self.total_time/float(bins)) array = self.dataarray[:, :] array[:-tau, 1] = array[tau, 1] # get the lower edges of the bins for every time series edges = numpy.sort(array, axis=1)[:, ::bin_edge] bins = edges.shape[1] return edges def shuffled_surrogate_for_mi(self, fourier=False, bins=16, tau_max=0, lag_mode='all'): """ Returns a shuffled surrogate of normalized mutual information from all pairs of nodes from a range of time lags. :arg int bins: number of bins for estimating MI :arg int tau_max: maximum lag in both directions, including last lag :arg str lag_mode: output mode :rtype: 3D numpy array (float) [index, index, index] :return: correlation matrix with different lag_mode choices """ if bins < 255: dtype = 'uint8' else: dtype = 'int16' # Normalize anomaly time series to zero mean and unit variance for all # lags, array contains normalizations for all lags corr_range = self.total_time - 2*tau_max # Shuffle a copy of dataarray seperatly for each node array = numpy.copy(self.dataarray) if fourier: array = self.correlatedNoiseSurrogates(array) else: for i in range(self.N): numpy.random.shuffle(array[i]) # get the bin quantile steps bin_edge = numpy.ceil(corr_range/float(bins)) symbolic_array = numpy.empty((1, self.N, corr_range), dtype=dtype) array = array[:, :corr_range] # get the lower edges of the bins for every time series edges = numpy.sort(array, axis=1)[:, ::bin_edge] bins = edges.shape[1] # This gives the symbolic time series symbolic_array[0] = \ (array.reshape(self.N, corr_range, 1) >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1 res = self._calculate_mi(symbolic_array, corr_range=corr_range, bins=bins, tau_max=0, lag_mode='all') if lag_mode == 'all': corrmat = numpy.repeat(res, 2*tau_max + 1, axis=0) elif lag_mode == 'sum': corrmat = numpy.array([res[0], res[0]]) * (tau_max+1.) elif lag_mode == 'max': corrmat = numpy.array( [res[0], numpy.random.randint(-tau_max, tau_max+1, (self.N, self.N))]) return corrmat def time_surrogate_for_mi(self, bins=16, sample_range=100, tau_max=1, lag_mode='all'): """ Returns a joint shuffled surrogate of the full dataarray of length sample_range for all taus. Used for time evolution analysis. First one initializes the CouplingAnalysis class with the full dataarray and then this function is called for every single surrogate. :arg int sample_range: length of sample :arg int bins: number of bins for estimating MI :arg int tau_max: maximum lag in both directions, including last lag :arg str lag_mode: output mode :rtype: 3D numpy array (float) [index, index, index] :return: correlation matrix with different lag_mode choices """ if bins < 255: dtype = 'uint8' else: dtype = 'int16' perm = numpy.random.permutation( range(tau_max, self.total_time - tau_max))[:sample_range] # get the bin quantile steps bin_edge = numpy.ceil(sample_range/float(bins)) symbolic_array = numpy.empty((2*tau_max + 1, self.N, sample_range), dtype=dtype) for t in range(2*tau_max + 1): tau = t - tau_max array = self.dataarray[:, perm + tau] # get the lower edges of the bins for every time series edges = numpy.sort(array, axis=1)[:, ::bin_edge] bins = edges.shape[1] # This gives the symbolic time series symbolic_array[t] = \ (array.reshape(self.N, sample_range, 1) >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1 return self._calculate_mi(symbolic_array, corr_range=sample_range, bins=bins, tau_max=tau_max, lag_mode=lag_mode) def _calculate_mi(self, array, corr_range, bins, tau_max, lag_mode): """ Returns the mi matrix. :arg int bins: number of bins for estimating MI :arg int tau_max: maximum lag in both directions, including last lag :arg str lag_mode: output mode :rtype: 3D numpy array (float) [index, index, index] :return: correlation matrix with different lag_mode choices """ # lag_mode dict mode = self.lag_modi[lag_mode] only_tri = int(self.only_tri) # Initialize hist2D = numpy.zeros((bins, bins), dtype="int32") if lag_mode == 'all': corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N), dtype='float32') elif lag_mode == 'sum': corrmat = numpy.zeros((2, self.N, self.N), dtype='float32') elif lag_mode == 'max': corrmat = numpy.zeros((2, self.N, self.N), dtype='float32') # Precalculation of the log gfunc = numpy.zeros(corr_range+1) for t in range(1, corr_range + 1): gfunc[t] = t*numpy.log(t) # loop over all node pairs, NOT symmetric due to time shifts! for i in range(self.N-only_tri): for j in range((i+1)*only_tri, self.N): if mode == 2: maxcross = 0.0 argmax = 0 # loop over taus from -tau_max to tau_max INCLUDING the last # tau value for t in range(2*tau_max + 1): tau = t - tau_max # here the joint probability distribution is calculated for k in range(corr_range): indexi = array[tau_max, i, k] indexj = array[t, j, k] hist2D[indexi, indexj] += 1 # here the joint entropy is calculated by summing over all # pattern combinations jointent = 0.0 for l in range(bins): for m in range(bins): jointent -= gfunc[hist2D[l, m]] hist2D[l, m] = 0 jointent /= float(corr_range) jointent += numpy.log(float(corr_range)) # Mutual Information is... mi = 0.0 mi = 2. * numpy.log(bins) - jointent # norm the mi mi /= numpy.log(bins) # fill in values in matrix depending on lag_mode if mode == 0: corrmat[t, i, j] = mi elif mode == 1: if t <= tau_max: corrmat[1, i, j] += mi if t >= tau_max: corrmat[0, i, j] += mi elif mode == 2: # calculate max and argmax by comparing to previous # value and storing max if mi > maxcross: maxcross = mi argmax = tau if mode == 2: corrmat[0, i, j] = maxcross corrmat[1, i, j] = argmax if self.only_tri: if lag_mode == 'all': corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1] if lag_mode == 'sum': corrmat[0] += corrmat[1].transpose()
'''Chats. ''' from utils import * from user import * from errors import ISkypeError class IChat(Cached): '''Represents a Skype chat. ''' def __repr__(self): return '<%s with Name=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Name)) def _Alter(self, AlterName, Args=None): return self._Skype._Alter('CHAT', self._Name, AlterName, Args, 'ALTER CHAT %s' % AlterName) def _Init(self, Name, Skype): self._Skype = Skype self._Name = Name def _Property(self, PropName, Value=None, Cache=True): return self._Skype._Property('CHAT', self._Name, PropName, Value, Cache) def AcceptAdd(self): '''Accepts a shared group add request. ''' self._Alter('ACCEPTADD') def AddMembers(self, *Members): '''Adds new members to the chat. @param Members: One or more users to add. @type Members: L{IUser} ''' self._Alter('ADDMEMBERS', ', '.join([x.Handle for x in Members])) def Bookmark(self): '''Bookmarks the chat in Skype client. ''' self._Alter('BOOKMARK') def ClearRecentMessages(self): '''Clears recent chat messages. ''' self._Alter('CLEARRECENTMESSAGES') def Disband(self): '''Ends the chat. ''' self._Alter('DISBAND') def EnterPassword(self, Password): '''Enters chat password. @param Password: Password @type Password: unicode ''' self._Alter('ENTERPASSWORD', Password) def Join(self): '''Joins the chat. ''' self._Alter('JOIN') def Kick(self, Handle): '''Kicks a member from chat. @param Handle: Handle @type Handle: unicode ''' self._Alter('KICK', Handle) def KickBan(self, Handle): '''Kicks and bans a member from chat. @param Handle: Handle @type Handle: unicode ''' self._Alter('KICKBAN', Handle) def Leave(self): '''Leaves the chat. ''' self._Alter('LEAVE') def OpenWindow(self): '''Opens the chat window. ''' self._Skype.Client.OpenDialog('CHAT', self._Name) def SendMessage(self, MessageText): '''Sends a chat message. @param MessageText: Message text @type MessageText: unicode @return: Message object @rtype: L{IChatMessage} ''' return IChatMessage(chop(self._Skype._DoCommand('CHATMESSAGE %s %s' % (self._Name, MessageText)), 2)[1], self._Skype) def SetPassword(self, Password, Hint=''): '''Sets the chat password. @param Password: Password @type Password: unicode @param Hint: Password hint @type Hint: unicode ''' if ' ' in Password: raise ValueError('Password mut be one word') self._Alter('SETPASSWORD', '%<PASSWORD>' % (Password, Hint)) def Unbookmark(self): '''Unbookmarks the chat. ''' self._Alter('UNBOOKMARK') def _GetActiveMembers(self): return tuple([IUser(x, self._Skype) for x in esplit(self._Property('ACTIVEMEMBERS', Cache=False))]) ActiveMembers = property(_GetActiveMembers, doc='''Active members of a chat. @type: tuple of L{IUser} ''') def _GetActivityDatetime(self): from datetime import datetime return datetime.fromtimestamp(self.ActivityTimestamp) ActivityDatetime = property(_GetActivityDatetime, doc='''Returns chat activity timestamp as datetime. @type: datetime.datetime ''') def _GetActivityTimestamp(self): return float(self._Property('ACTIVITY_TIMESTAMP')) ActivityTimestamp = property(_GetActivityTimestamp, doc='''Returns chat activity timestamp. @type: float @see: L{ActivityDatetime} ''') def _GetAdder(self): return IUser(self._Property('ADDER'), self._Skype) Adder = property(_GetAdder, doc='''Returns the user that added current user to the chat. @type: L{IUser} ''') def _SetAlertString(self, value): self._Alter('SETALERTSTRING', quote('=%s' % value)) AlertString = property(fset=_SetAlertString, doc='''Chat alert string. Only messages containing words from this string will cause a notification to pop up on the screen. @type: unicode ''') def _GetApplicants(self): return tuple([IUser(x, self._Skype) for x in esplit(self._Property('APPLICANTS'))]) Applicants = property(_GetApplicants, doc='''Chat applicants. @type: tuple of L{IUser} ''') def _GetBlob(self): return self._Property('BLOB') Blob = property(_GetBlob, doc='''Chat blob. @type: unicode ''') def _GetBookmarked(self): return self._Property('BOOKMARKED') == 'TRUE' Bookmarked = property(_GetBookmarked, doc='''Tells if this chat is bookmarked. @type: bool ''') def _GetDatetime(self): from datetime import datetime return datetime.fromtimestamp(self.Timestamp) Datetime = property(_GetDatetime, doc='''Chat timestamp as datetime. @type: datetime.datetime ''') def _GetDescription(self): return self._Property('DESCRIPTION') def _SetDescription(self, value): self._Property('DESCRIPTION', value) Description = property(_GetDescription, _SetDescription, doc='''Chat description. @type: unicode ''') def _GetDialogPartner(self): return self._Property('DIALOG_PARTNER') DialogPartner = property(_GetDialogPartner, doc='''Skypename of the chat dialog partner. @type: unicode ''') def _GetFriendlyName(self): return self._Property('FRIENDLYNAME') FriendlyName = property(_GetFriendlyName, doc='''Friendly name of the chat. @type: unicode ''') def _GetGuideLines(self): return self._Property('GUIDELINES') def _SetGuideLines(self, value): self._Alter('SETGUIDELINES', value) GuideLines = property(_GetGuideLines, _SetGuideLines, doc='''Chat guidelines. @type: unicode ''') def _GetMemberObjects(self): return tuple([IChatMember(x, self._Skype) for x in esplit(self._Property('MEMBEROBJECTS'), ', ')]) MemberObjects = property(_GetMemberObjects, doc='''Chat members as member objects. @type: tuple of L{IChatMember} ''') def _GetMembers(self): return tuple([IUser(x, self._Skype) for x in esplit(self._Property('MEMBERS'))]) Members = property(_GetMembers, doc='''Chat members. @type: tuple of L{IUser} ''') def _GetMessages(self): return tuple([IChatMessage(x ,self._Skype) for x in esplit(self._Property('CHATMESSAGES', Cache=False), ', ')]) Messages = property(_GetMessages, doc='''All chat messages. @type: tuple of L{IChatMessage} ''') def _GetMyRole(self): return self._Property('MYROLE') MyRole = property(_GetMyRole, doc='''My chat role in a public chat. @type: L{Chat member role<enums.chatMemberRoleUnknown>} ''') def _GetMyStatus(self): return self._Property('MYSTATUS') MyStatus = property(_GetMyStatus, doc='''My status in a public chat. @type: L{My chat status<enums.chatStatusUnknown>} ''') def _GetName(self): return self._Name Name = property(_GetName, doc='''Chat name as used by Skype to identify this chat. @type: unicode ''') def _GetOptions(self): return int(self._Property('OPTIONS')) def _SetOptions(self, value): self._Alter('SETOPTIONS', value) Options = property(_GetOptions, _SetOptions, doc='''Chat options. A mask. @type: L{Chat options<enums.chatOptionJoiningEnabled>} ''') def _GetPasswordHint(self): return self._Property('PASSWORDHINT') PasswordHint = property(_GetPasswordHint, doc='''Chat password hint. @type: unicode ''') def _GetPosters(self): return tuple([IUser(x, self._Skype) for x in esplit(self._Property('POSTERS'))]) Posters = property(_GetPosters, doc='''Users who have posted messages to this chat. @type: tuple of L{IUser} ''') def _GetRecentMessages(self): return tuple([IChatMessage(x, self._Skype) for x in esplit(self._Property('RECENTCHATMESSAGES', Cache=False), ', ')]) RecentMessages = property(_GetRecentMessages, doc='''Most recent chat messages. @type: tuple of L{IChatMessage} ''') def _GetStatus(self): return self._Property('STATUS') Status = property(_GetStatus, doc='''Status. @type: L{Chat status<enums.chsUnknown>} ''') def _GetTimestamp(self): return float(self._Property('TIMESTAMP')) Timestamp = property(_GetTimestamp, doc='''Chat timestamp. @type: float @see: L{Datetime} ''') def _GetTopic(self): try: topicxml = self._Property('TOPICXML') if topicxml: return topicxml except ISkypeError: pass return self._Property('TOPIC') def _SetTopic(self, value): try: self._Alter('SETTOPICXML', value) except ISkypeError: self._Alter('SETTOPIC', value) Topic = property(_GetTopic, _SetTopic, doc='''Chat topic. @type: unicode ''') def _GetTopicXML(self): return self._Property('TOPICXML') def _SetTopicXML(self, value): self._Property('TOPICXML', value) TopicXML = property(_GetTopicXML, _SetTopicXML, doc='''Chat topic in XML format. @type: unicode ''') def _GetType(self): return self._Property('TYPE') Type = property(_GetType, doc='''Chat type. @type: L{Chat type<enums.chatTypeUnknown>} ''') class IChatMessage(Cached): '''Represents a single chat message. ''' def __repr__(self): return '<%s with Id=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Id)) def _Init(self, Id, Skype): self._Skype = Skype self._Id = int(Id) def _Property(self, PropName, Value=None, Cache=True): return self._Skype._Property('CHATMESSAGE', self._Id, PropName, Value, Cache) def MarkAsSeen(self): '''Marks a missed chat message as seen. ''' self._Skype._DoCommand('SET CHATMESSAGE %d SEEN' % self._Id, 'CHATMESSAGE %d STATUS READ' % self._Id) def _GetBody(self): return self._Property('BODY') def _SetBody(self, value): self._Property('BODY', value) Body = property(_GetBody, _SetBody, doc='''Chat message body. @type: unicode ''') def _GetChat(self): return IChat(self.ChatName, self._Skype) Chat = property(_GetChat, doc='''Chat this message was posted on. @type: L{IChat} ''') def _GetChatName(self): return self._Property('CHATNAME') ChatName = property(_GetChatName, doc='''Name of the chat this message was posted on. @type: unicode ''') def _GetDatetime(self): from datetime import datetime return datetime.fromtimestamp(self.Timestamp) Datetime = property(_GetDatetime, doc='''Chat message timestamp as datetime. @type: datetime.datetime ''') def _GetEditedBy(self): return self._Property('EDITED_BY') EditedBy = property(_GetEditedBy, doc='''Skypename of the user who edited this message. @type: unicode ''') def _GetEditedDatetime(self): from datetime import datetime return datetime.fromtimestamp(self.EditedTimestamp) EditedDatetime = property(_GetEditedDatetime, doc='''Message editing timestamp as datetime. @type: datetime.datetime ''') def _GetEditedTimestamp(self): return float(self._Property('EDITED_TIMESTAMP')) EditedTimestamp = property(_GetEditedTimestamp, doc='''Message editing timestamp. @type: float ''') def _GetFromDisplayName(self): return self._Property('FROM_DISPNAME') FromDisplayName = property(_GetFromDisplayName, doc='''DisplayName of the message sender. @type: unicode ''') def _GetFromHandle(self): return self._Property('FROM_HANDLE') FromHandle = property(_GetFromHandle, doc='''Skypename of the message sender. @type: unicode ''') def _GetId(self): return self._Id Id = property(_GetId, doc='''Chat message Id. @type: int ''') def _GetIsEditable(self): return self._Property('IS_EDITABLE') == 'TRUE' IsEditable = property(_GetIsEditable, doc='''Tells if message body is editable. @type: bool ''') def _GetLeaveReason(self): return self._Property('LEAVEREASON') LeaveReason = property(_GetLeaveReason, doc='''LeaveReason. @type: L{Chat leave reason<enums.leaUnknown>} ''') def _SetSeen(self, value): from warnings import warn warn('IChatMessage.Seen = x: Use IChatMessage.MarkAsSeen() instead.', DeprecationWarning, stacklevel=2) if value: self.MarkAsSeen() else: raise ISkypeError(0, 'Seen can only be set to True') Seen = property(fset=_SetSeen, doc='''Marks a missed chat message as seen. @type: bool @deprecated: Unpythonic, use L{MarkAsSeen} instead. ''') def _GetSender(self): return IUser(self.FromHandle, self._Skype) Sender = property(_GetSender, doc='''Sender of the chat message. @type: L{IUser} ''') def _GetStatus(self): return self._Property('STATUS') Status = property(_GetStatus, doc='''Status of the chat messsage. @type: L{Chat message status<enums.cmsUnknown>} ''') def _GetTimestamp(self): return float(self._Property('TIMESTAMP')) Timestamp = property(_GetTimestamp, doc='''Chat message timestamp. @type: float @see: L{Datetime} ''') def _GetType(self): return self._Property('TYPE') Type = property(_GetType, doc='''Type of chat message. @type: L{Chat message type<enums.cmeUnknown>} ''') def _GetUsers(self): return tuple([IUser(self._Skype, x) for x in esplit(self._Property('USERS'))]) Users = property(_GetUsers, doc='''Users added to the chat. @type: tuple of L{IUser} ''') class IChatMember(Cached): '''Represents a member of a public chat. ''' def __repr__(self): return '<%s with Id=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Id)) def _Alter(self, AlterName, Args=None): return self._Skype._Alter('CHATMEMBER', self._Id, AlterName, Args, 'ALTER CHATMEMBER %s' % AlterName) def _Init(self, Id, Skype): self._Skype = Skype self._Id = int(Id) def _Property(self, PropName, Value=None, Cache=True): return self._Skype._Property('CHATMEMBER', self._Id, PropName, Value, Cache) def CanSetRoleTo(self, Role): '''Checks if the new role can be applied to the member. @param Role: New chat member role.
is None: ec2 = module.client('ec2') spec = dict( ClientToken=uuid.uuid4().hex, MaxCount=1, MinCount=1, ) # network parameters spec['NetworkInterfaces'] = build_network_spec(params, ec2) spec['BlockDeviceMappings'] = build_volume_spec(params) spec.update(**build_top_level_options(params)) spec['TagSpecifications'] = build_instance_tags(params) # IAM profile if params.get('instance_role'): spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role'))) spec['InstanceType'] = params['instance_type'] return spec def await_instances(ids, state='OK'): if not module.params.get('wait', True): # the user asked not to wait for anything return state_opts = { 'OK': 'instance_status_ok', 'STOPPED': 'instance_stopped', 'TERMINATED': 'instance_terminated', 'EXISTS': 'instance_exists', 'RUNNING': 'instance_running', } if state not in state_opts: module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state)) waiter = module.client('ec2').get_waiter(state_opts[state]) try: waiter.wait( InstanceIds=ids, WaiterConfig={ 'Delay': 15, 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, } ) except botocore.exceptions.WaiterConfigError as e: module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( to_native(e), ', '.join(ids), state)) except botocore.exceptions.WaiterError as e: module.warn("Instances {0} took too long to reach state {1}. {2}".format( ', '.join(ids), state, to_native(e))) def diff_instance_and_params(instance, params, ec2=None, skip=None): """boto3 instance obj, module params""" if ec2 is None: ec2 = module.client('ec2') if skip is None: skip = [] changes_to_apply = [] id_ = instance['InstanceId'] ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) def value_wrapper(v): return {'Value': v} param_mappings = [ ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), # user data is an immutable property # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), ] for mapping in param_mappings: if params.get(mapping.param_key) is not None and mapping.instance_key not in skip: value = ec2.describe_instance_attribute(Attribute=mapping.attribute_name, InstanceId=id_) if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key): arguments = dict( InstanceId=instance['InstanceId'], # Attribute=mapping.attribute_name, ) arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) changes_to_apply.append(arguments) if (params.get('network') or {}).get('source_dest_check') is not None: # network.source_dest_check is nested, so needs to be treated separately check = bool(params.get('network').get('source_dest_check')) if instance['SourceDestCheck'] != check: changes_to_apply.append(dict( InstanceId=instance['InstanceId'], SourceDestCheck={'Value': check}, )) return changes_to_apply def change_network_attachments(instance, params, ec2): if (params.get('network') or {}).get('interfaces') is not None: new_ids = [] for inty in params.get('network').get('interfaces'): if isinstance(inty, dict) and 'id' in inty: new_ids.append(inty['id']) elif isinstance(inty, string_types): new_ids.append(inty) # network.interfaces can create the need to attach new interfaces old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] to_attach = set(new_ids) - set(old_ids) for eni_id in to_attach: ec2.attach_network_interface( DeviceIndex=new_ids.index(eni_id), InstanceId=instance['InstanceId'], NetworkInterfaceId=eni_id, ) return bool(len(to_attach)) return False def find_instances(ec2, ids=None, filters=None): paginator = ec2.get_paginator('describe_instances') if ids: return list(paginator.paginate( InstanceIds=ids, ).search('Reservations[].Instances[]')) elif filters is None: module.fail_json(msg="No filters provided when they were required") elif filters is not None: for key in filters.keys(): if not key.startswith("tag:"): filters[key.replace("_", "-")] = filters.pop(key) return list(paginator.paginate( Filters=ansible_dict_to_boto3_filter_list(filters) ).search('Reservations[].Instances[]')) return [] @AWSRetry.jittered_backoff() def get_default_vpc(ec2): vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) if len(vpcs.get('Vpcs', [])): return vpcs.get('Vpcs')[0] return None @AWSRetry.jittered_backoff() def get_default_subnet(ec2, vpc, availability_zone=None): subnets = ec2.describe_subnets( Filters=ansible_dict_to_boto3_filter_list({ 'vpc-id': vpc['VpcId'], 'state': 'available', 'default-for-az': 'true', }) ) if len(subnets.get('Subnets', [])): if availability_zone is not None: subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) if availability_zone in subs_by_az: return subs_by_az[availability_zone] # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) return by_az[0] return None def ensure_instance_state(state, ec2=None): if ec2 is None: module.client('ec2') if state in ('running', 'started'): changed, failed, instances = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') if failed: module.fail_json( msg="Unable to start instances", reboot_success=list(changed), reboot_failed=failed) module.exit_json( msg='Instances started', reboot_success=list(changed), changed=bool(len(changed)), reboot_failed=[], instances=[pretty_instance(i) for i in instances], ) elif state in ('restarted', 'rebooted'): changed, failed, instances = change_instance_state( filters=module.params.get('filters'), desired_state='STOPPED') changed, failed, instances = change_instance_state( filters=module.params.get('filters'), desired_state='RUNNING') if failed: module.fail_json( msg="Unable to restart instances", reboot_success=list(changed), reboot_failed=failed) module.exit_json( msg='Instances restarted', reboot_success=list(changed), changed=bool(len(changed)), reboot_failed=[], instances=[pretty_instance(i) for i in instances], ) elif state in ('stopped',): changed, failed, instances = change_instance_state( filters=module.params.get('filters'), desired_state='STOPPED') if failed: module.fail_json( msg="Unable to stop instances", stop_success=list(changed), stop_failed=failed) module.exit_json( msg='Instances stopped', stop_success=list(changed), changed=bool(len(changed)), stop_failed=[], instances=[pretty_instance(i) for i in instances], ) elif state in ('absent', 'terminated'): terminated, terminate_failed, instances = change_instance_state( filters=module.params.get('filters'), desired_state='TERMINATED') if terminate_failed: module.fail_json( msg="Unable to terminate instances", terminate_success=list(terminated), terminate_failed=terminate_failed) module.exit_json( msg='Instances terminated', terminate_success=list(terminated), changed=bool(len(terminated)), terminate_failed=[], instances=[pretty_instance(i) for i in instances], ) @AWSRetry.jittered_backoff() def change_instance_state(filters, desired_state, ec2=None): """Takes STOPPED/RUNNING/TERMINATED""" if ec2 is None: ec2 = module.client('ec2') changed = set() instances = find_instances(ec2, filters=filters) to_change = set(i['InstanceId'] for i in instances) unchanged = set() for inst in instances: try: if desired_state == 'TERMINATED': # TODO use a client-token to prevent double-sends of these start/stop/terminate commands # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] if desired_state == 'STOPPED': if inst['State']['Name'] == 'stopping': unchanged.add(inst['InstanceId']) continue resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] if desired_state == 'RUNNING': resp = ec2.start_instances(InstanceIds=[inst['InstanceId']]) [changed.add(i['InstanceId']) for i in resp['StartingInstances']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # we don't care about exceptions here, as we'll fail out if any instances failed to terminate pass if changed: await_instances(ids=list(changed) + list(unchanged), state=desired_state) change_failed = list(to_change - changed) instances = find_instances(ec2, ids=list(to_change)) return changed, change_failed, instances def pretty_instance(i): instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags']) return instance def determine_iam_role(name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return role['InstanceProfile']['Arn'] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn)) def handle_existing(existing_matches, changed, ec2, state): if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']: ins_changed, failed, instances = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING') module.exit_json( changed=bool(len(ins_changed)) or changed, instances=[pretty_instance(i) for i in instances], instance_ids=[i['InstanceId'] for i in instances], ) changes = diff_instance_and_params(existing_matches[0], module.params) for c in changes: ec2.modify_instance_attribute(**c) changed |= bool(changes) changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role')) changed |= change_network_attachments(existing_matches[0], module.params, ec2) altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches]) module.exit_json( changed=bool(len(changes)) or changed, instances=[pretty_instance(i) for i in altered], instance_ids=[i['InstanceId'] for i in altered], changes=changes, ) def ensure_present(existing_matches, changed, ec2, state): if len(existing_matches): try: handle_existing(existing_matches, changed, ec2, state) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws( e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])), # instances=[pretty_instance(i) for i in existing_matches], # instance_ids=[i['InstanceId'] for i in existing_matches], ) try: instance_spec = build_run_instance_spec(module.params) instance_response = run_instances(ec2, **instance_spec) instances = instance_response['Instances'] instance_ids = [i['InstanceId'] for i in instances] for ins in instances: changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) for c in changes: try: AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) await_instances(instance_ids) instances = ec2.get_paginator('describe_instances').paginate( InstanceIds=instance_ids ).search('Reservations[].Instances[]') module.exit_json( changed=True, instances=[pretty_instance(i) for i in instances], instance_ids=instance_ids, spec=instance_spec, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create new EC2 instance") @AWSRetry.jittered_backoff() def run_instances(ec2, **instance_spec): try: return ec2.run_instances(**instance_spec) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']: # If the instance profile has just been created, it takes some time to be visible by ec2 # So we wait 10 second and retry the run_instances time.sleep(10) return ec2.run_instances(**instance_spec) else: raise e def main(): global module argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), wait=dict(default=True, type='bool'), wait_timeout=dict(default=600, type='int'), # count=dict(default=1, type='int'), image=dict(type='dict'), image_id=dict(type='str'), instance_type=dict(default='t2.micro', type='str'), user_data=dict(type='str'), tower_callback=dict(type='dict'), ebs_optimized=dict(type='bool'), vpc_subnet_id=dict(type='str', aliases=['subnet_id']), availability_zone=dict(type='str'), security_groups=dict(default=[], type='list'), security_group=dict(type='str'), instance_role=dict(type='str'), name=dict(type='str'), tags=dict(type='dict'), purge_tags=dict(type='bool', default=False), filters=dict(type='dict', default=None), launch_template=dict(type='dict'), key_name=dict(type='str'), cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), cpu_options=dict(type='dict', options=dict( core_count=dict(type='int', required=True), threads_per_core=dict(type='int', choices=[1, 2], required=True) )), tenancy=dict(type='str', choices=['dedicated', 'default']), instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), termination_protection=dict(type='bool'), detailed_monitoring=dict(type='bool'), instance_ids=dict(default=[], type='list'), network=dict(default=None, type='dict'), volumes=dict(default=None, type='list'), )) # running/present are synonyms # as are terminated/absent module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ ['security_groups', 'security_group'], ['availability_zone', 'vpc_subnet_id'], ['tower_callback', 'user_data'], ['image_id', 'image'], ], supports_check_mode=True ) if module.params.get('network'): if 'ebs_optimized' in module.params['network']: module.deprecate("network.ebs_optimized is deprecated." "Use the top level ebs_optimized parameter instead", 2.9) if module.params.get('network').get('interfaces'): if module.params.get('security_group'): module.fail_json(msg="Parameter network.interfaces can't be used with security_group") if module.params.get('security_groups'): module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") state = module.params.get('state') ec2 = module.client('ec2') if module.params.get('filters') is None: filters = { # all states except shutting-down and terminated 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'] } if state == 'stopped': # only need to change instances that aren't already stopped filters['instance-state-name'] = ['stopping', 'pending', 'running'] if isinstance(module.params.get('instance_ids'), string_types): filters['instance-id'] = [module.params.get('instance_ids')] elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): filters['instance-id'] = module.params.get('instance_ids') else: if not module.params.get('vpc_subnet_id'): if module.params.get('network'): # grab AZ from one of the ENIs ints = module.params.get('network').get('interfaces') if ints: filters['network-interface.network-interface-id'] = [] for i in ints: if
neighbors {neighbor}', 'show ip bgp {address_family} all neighbors', 'show ip bgp {address_family} all neighbors {neighbor}', ] exclude = ['current_time', 'last_read', 'last_write', 'up_time', 'ackhold' , 'retrans', 'keepalives', 'total', 'total_data', 'value', 'with_data', 'delrcvwnd', 'rcvnxt', 'rcvwnd', 'receive_idletime' , 'sent_idletime', 'sndnxt', 'snduna', 'uptime'] def cli(self, neighbor='', address_family='', output=None): # Restricted address families restricted_list = ['ipv4 unicast', 'ipv6 unicast'] # Init vars ret_dict = {} if output is None: # Select the command if address_family and neighbor: if address_family not in restricted_list: cmd = self.cli_command[3].format(address_family=address_family, neighbor=neighbor) else: return ret_dict elif address_family: if address_family not in restricted_list: cmd = self.cli_command[2].format(address_family=address_family) else: return ret_dict elif neighbor: cmd = self.cli_command[1].format(neighbor=neighbor) else: cmd = self.cli_command[0] # Execute command show_output = self.device.execute(cmd) else: show_output = output # Call super return super().cli(output=show_output, neighbor=neighbor, address_family=address_family) # =================================================================== # Parser for: # * 'show ip bgp neighbors' # * 'show ip bgp neighbors {neighbor}' # * 'show ip bgp {address_family} neighbors' # * 'show ip bgp {address_family} neighbors {neighbor}' # * 'show ip bgp {address_family} vrf {vrf} neighbors' # * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor}' # =================================================================== class ShowIpBgpNeighbors(ShowBgpNeighborSuperParser, ShowBgpAllNeighborsSchema): ''' Parser for: * 'show ip bgp neighbors' * 'show ip bgp neighbors {neighbor}' * 'show ip bgp {address_family} neighbors' * 'show ip bgp {address_family} neighbors {neighbor}' * 'show ip bgp {address_family} vrf {vrf} neighbors' * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor}' ''' cli_command = ['show ip bgp {address_family} vrf {vrf} neighbors {neighbor}', 'show ip bgp {address_family} vrf {vrf} neighbors', 'show ip bgp {address_family} neighbors {neighbor}', 'show ip bgp {address_family} neighbors', 'show ip bgp neighbors {neighbor}', 'show ip bgp neighbors', ] excude = ['current_time' , 'last_read' , 'last_write', 'up_time', 'ackhold', 'retrans', 'keepalives', 'total', 'total_data' , 'value', 'with_data', 'delrcvwnd', 'rcvnxt', 'rcvwnd' 'receive_idletime', 'sent_idletime', 'sndnxt', 'snduna', 'uptime'] def cli(self, neighbor='', address_family='', vrf='', output=None): # Restricted address families restricted_list = ['ipv4 unicast', 'ipv6 unicast', 'link-state link-state'] # Init vars ret_dict = {} if output is None: # Select the command if address_family and vrf and neighbor: if address_family not in restricted_list: cmd = self.cli_command[0].\ format(address_family=address_family, vrf=vrf, neighbor=neighbor) else: return ret_dict elif address_family and vrf: if address_family not in restricted_list: cmd = self.cli_command[1].\ format(address_family=address_family, vrf=vrf) else: return ret_dict elif address_family and neighbor: if address_family in restricted_list: cmd = self.cli_command[2].format(address_family=address_family, neighbor=neighbor) else: return ret_dict elif address_family: if address_family in restricted_list: cmd = self.cli_command[3].format(address_family=address_family) else: return ret_dict elif neighbor: cmd = self.cli_command[4].format(neighbor=neighbor) else: cmd = self.cli_command[5] # Execute command show_output = self.device.execute(cmd) else: show_output = output # Call super return super().cli(output=show_output, neighbor=neighbor, vrf=vrf, address_family=address_family) #------------------------------------------------------------------------------- # ============================================================================== # Schema for: # * 'show bgp all neighbors {neighbor} advertised-routes' # * 'show bgp {address_family} all neighbors {neighbor} advertised-routes' # * 'show bgp neighbors {neighbor} advertised-routes' # * 'show bgp {address_family} neighbors {neighbor} advertised-routes' # * 'show ip bgp all neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes' # * 'show ip bgp neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} neighbors {neighbor} advertised-routes' # ============================================================================== class ShowBgpNeighborsAdvertisedRoutesSchema(MetaParser): ''' Schema for: * 'show bgp all neighbors {neighbor} advertised-routes' * 'show bgp {address_family} all neighbors {neighbor} advertised-routes' * 'show bgp neighbors {neighbor} advertised-routes' * 'show bgp {address_family} neighbors {neighbor} advertised-routes' * 'show ip bgp all neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes' * 'show ip bgp neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} neighbors {neighbor} advertised-routes' ''' schema = { 'vrf': {Any(): {'neighbor': {Any(): {'address_family': {Any(): {Optional('bgp_table_version'): int, Optional('local_router_id'): str, Optional('route_distinguisher'): str, Optional('default_vrf'): str, Optional('advertised'): {Optional(Any()): {Optional('index'): {Optional(Any()): {Optional('next_hop'): str, Optional('status_codes'): str, Optional('path_type'): str, Optional('metric'): int, Optional('localprf'): int, Optional('weight'): int, Optional('path'): str, Optional('origin_codes'): str, }, }, }, }, }, }, }, }, }, }, } # ============================================================================== # Super Parser for: # * 'show bgp all neighbors {neighbor} advertised-routes' # * 'show bgp {address_family} all neighbors {neighbor} advertised-routes' # * 'show bgp neighbors {neighbor} advertised-routes' # * 'show bgp {address_family} neighbors {neighbor} advertised-routes' # * 'show ip bgp all neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes' # * 'show ip bgp neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} rd {rd} neighbors {neighbor} advertised-routes' # * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor} advertised-routes' # ============================================================================== class ShowBgpNeighborsAdvertisedRoutesSuperParser(ShowBgpNeighborsAdvertisedRoutesSchema): ''' Parser for: * 'show bgp all neighbors {neighbor} advertised-routes' * 'show bgp {address_family} all neighbors {neighbor} advertised-routes' * 'show bgp neighbors {neighbor} advertised-routes' * 'show bgp {address_family} neighbors {neighbor} advertised-routes' * 'show ip bgp all neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes' * 'show ip bgp neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} rd {rd} neighbors {neighbor} advertised-routes' * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor} advertised-routes' ''' def cli(self, neighbor, address_family='', output=None): p = re.compile(r'^BGP +neighbor +is +(?P<bgp_neighbor>[0-9A-Z\:\.]+)' '(, +vrf +(?P<vrf>[0-9A-Za-z]+))?, +remote AS ' '+(?P<remote_as_id>[0-9]+), ' '+(?P<internal_external_link>[a-z\s]+)$') p1 = re.compile(r'^\s*For +address +family:' ' +(?P<address_family>[a-zA-Z0-9\s\-\_]+)$') p3_1 = re.compile(r'^\s*(?P<status_codes>(s|x|S|d|h|\*|\>|\s)+)?' '(?P<path_type>(i|e|c|l|a|r|I))?' '(?P<prefix>[a-zA-Z0-9\.\:\/\[\]\,]+)' '(?: *(?P<next_hop>[a-zA-Z0-9\.\:\/\[\]\,]+))?$') p3_2 = re.compile(r'^\s*(?P<status_codes>(s|x|S|d|b|h|\*|\>|\s)+)' '(?P<path_type>(i|e|c|l|a|r|I))?(\s)?' '(?P<prefix>(([0-9]+[\.][0-9]+[\.][0-9]+' '[\.][0-9]+[\/]?[0-9]*)|([a-zA-Z0-9]+[\:]' '[a-zA-Z0-9]+[\:][a-zA-Z0-9]+[\:]' '[a-zA-Z0-9]+[\:][\:][\/][0-9]+)|' '([a-zA-Z0-9]+[\:][a-zA-Z0-9]+[\:]' '[a-zA-Z0-9]+[\:][\:][\/][0-9]+)))' ' +(?P<next_hop>[a-zA-Z0-9\.\:]+)' ' +(?P<numbers>[a-zA-Z0-9\s\(\)\{\}]+)' ' +(?P<origin_codes>(i|e|\?|\&|\|))$') p3_3 = re.compile(r'^\s*(?P<status_codes>(s|x|S|d|h|\*|\>|\s)+)?' '(?P<path_type>(i|e|c|l|a|r|I))?' ' +(?P<next_hop>(([0-9]+[\.][0-9]+[\.][0-9]' '+[\.][0-9]+)|([a-zA-Z0-9]+[\:][a-zA-Z0-9]+' '[\:][a-zA-Z0-9]+[\:][a-zA-Z0-9]+[\:]' '[a-zA-Z0-9]+[\:][\:][a-zA-Z0-9])|' '([a-zA-Z0-9]+[\:][a-zA-Z0-9]+[\:][a-zA-Z0-9]+' '[\:][a-zA-Z0-9]+[\:][\:][a-zA-Z0-9])))?' '(?: +(?P<numbers>[a-zA-Z0-9\s\(\)\{\}]+))?' ' +(?P<origin_codes>(i|e|\?|\|))$') p4 = re.compile(r'^\s*Route +Distinguisher *: ' '+(?P<route_distinguisher>(\S+))' '( +\(default for vrf +(?P<default_vrf>(\S+))\))?' '( +VRF Router ID (?P<vrf_router_id>(\S+)))?$') # Get VRF name by executing 'show bgp all neighbors | i BGP neighbor' out_vrf = self.device.execute('show bgp all neighbors | i BGP neighbor') vrf = 'default' for line in out_vrf.splitlines(): line = line.strip() # BGP neighbor is 10.16.2.2, remote AS 100, internal link m = p.match(line) if m: if m.groupdict()['bgp_neighbor'] == neighbor: if m.groupdict()['vrf']: vrf = str(m.groupdict()['vrf']) break else: continue # Init vars route_dict = {} af_dict = {} data_on_nextline = False index = 1 bgp_table_version = local_router_id = '' neighbor_id = neighbor if address_family: original_address_family = address_family # For address family: IPv4 Unicast # BGP table version is 25, Local Router ID is 10.186.101.1 p2 = re.compile(r'^\s*BGP +table +version +is' ' +(?P<bgp_table_version>[0-9]+), +[Ll]ocal +[Rr]outer' ' +ID +is +(?P<local_router_id>(\S+))$') # Status: s-suppressed, x-deleted, S-stale, d-dampened, h-history, *-valid, >-best # Path type: i-internal, e-external, c-confed, l-local, a-aggregate, r-redist, I-injected # Origin codes: i - IGP, e - EGP, ? - incomplete, | - multipath, & - backup # *>i[2]:[77][7,0][10.69.9.9,1,151587081][10.135.1.1,22][10.106.101.1,10.76.1.30]/616 # *>iaaaa:1::/113 ::ffff:10.106.101.1 # *> 646:22:22::/64 2001:DB8:20:4:6::6 # Network Next Hop Metric LocPrf Weight Path # *>i 10.1.2.0/24 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e # *>l10.4.1.0/24 0.0.0.0 100 32768 i # *>r10.16.1.0/24 0.0.0.0 4444 100 32768 ? # *>r10.16.1.0 0.0.0.0 4444 100 32768 ? # *>r10.16.2.0/24 0.0.0.0 4444 100 32768 ? # *>i10.49.0.0/16 10.106.101.1 100 0 10 20 30 40 50 60 70 80 90 i # *>i10.4.2.0/24 10.106.102.4 100 0 {62112 33492 4872 41787 13166 50081 21461 58376 29755 1135} i # *>i 172.16.51.0/24 192.168.36.220 0 100 0 ? p3_2 = re.compile(r'^\s*(?P<status_codes>(s|x|S|d|b|h|\*|\>|\s)+)' '(?P<path_type>(i|e|c|l|a|r|I))?(\s+)?(?P<prefix>\S+) +(?P<next_hop>' '[a-zA-Z0-9\.\:]+) +(?P<numbers>[a-zA-Z0-9\s\(\)\{\}]+) +' '(?P<origin_codes>(i|e|\?|\&|\|))$') # 0.0.0.0 100 32768 i # 10.106.101.1 4444 100 0 3 10 20 30 40 50 60 70 80 90 i #*>i 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e # 2219 0 400 33299 51178 47751 {27016} e # Route Distinguisher: 200:1 # Route Distinguisher: 300:1 (default for vrf VRF1) VRF Router ID 10.94.44.44 for line in output.splitlines(): line = line.rstrip() # For address family: IPv4 Unicast m = p1.match(line) if m: address_family = m.groupdict()['address_family'].lower() original_address_family = address_family continue # BGP table version is 25, Local Router ID is 10.186.101.1 m = p2.match(line) if m: bgp_table_version = int(m.groupdict()['bgp_table_version']) local_router_id = str(m.groupdict()['local_router_id']) # Init dict if 'vrf' not in route_dict: route_dict['vrf'] = {} if vrf not in route_dict['vrf']: route_dict['vrf'][vrf] = {} if 'neighbor' not in route_dict['vrf'][vrf]: route_dict['vrf'][vrf]['neighbor'] = {} if neighbor_id not in route_dict['vrf'][vrf]['neighbor']: route_dict['vrf'][vrf]['neighbor'][neighbor_id] = {} if 'address_family' not in route_dict['vrf'][vrf]['neighbor']\ [neighbor_id]: route_dict['vrf'][vrf]['neighbor'][neighbor_id]\ ['address_family'] = {} if address_family not in route_dict['vrf'][vrf]['neighbor']\ [neighbor_id]['address_family']: route_dict['vrf'][vrf]['neighbor'][neighbor_id]\ ['address_family'][address_family] = {} # Set af_dict af_dict = route_dict['vrf'][vrf]['neighbor'][neighbor_id]\ ['address_family'][address_family] # Init advertised dict if 'advertised' not in af_dict: af_dict['advertised'] = {} route_dict['vrf'][vrf]['neighbor'][neighbor_id]\ ['address_family'][address_family]['bgp_table_version'] = \ bgp_table_version route_dict['vrf'][vrf]['neighbor'][neighbor_id]\ ['address_family'][address_family]['local_router_id'] = \
""" =========== classify.py =========== This module contains functionality for classifying nanopore captures. """ import os import re from abc import ABC, abstractmethod from dataclasses import dataclass from pathlib import PosixPath from typing import * # I know people don't like import *, but I think it has benefits for types (doesn't impede people from being generous with typing) import numpy as np import torch import torch.nn as nn from ..logger import Logger, getLogger from ..signals import Capture, FractionalizedSignal, RawSignal # TODO: Pipe through filtering https://github.com/uwmisl/poretitioner/issues/43 https://github.com/uwmisl/poretitioner/issues/68 # from .models import NTERs_trained_cnn_05152019 as pretrained_model from . import filtering from .configuration import ClassifierConfiguration from .core import NumpyArrayLike, PathLikeOrString, ReadId use_cuda = False # True # TODO : Don't hardcode use of CUDA : https://github.com/uwmisl/poretitioner/issues/41 ClassLabel = NewType("ClassLabel", str) # Maps a numpy array like (some vector encoding that represents a label) to a the label string. LabelForResult = Callable[[NumpyArrayLike], ClassLabel] __all__ = [ "predict_class", "ClassificationRunId", "ClassifierDetails", "ClassifierPlugin", "CLASSIFICATION_PATH", "ClassificationResult", "PytorchClassifierPlugin", ] # Uniquely identifies a classification run that happened (e.g. 'NTER_2018_RandomForest_Attempt_3'). ClassificationRunId = NewType("ClassificationRunId", str) @dataclass(frozen=True) class ClassifierDetails: model: str model_version: str classification_threshold: float # Timestamp of when this classification occurred, in seconds from epoch (as a float). # # Q: Why not date-time? # # A: Sadly, as of 2020, h5py doesn't provide a good way of storing dates [1]. # Doing so would also be less precise than storing epoch time. # # Q: Why seconds (knowing it will be fractionalized)? # # A: On most modern machines, python time.time() provides micro-second precision. # But this can't be guaranteed (on older machines, it might only provide second precision) [1]. # # If we really wanted an int, the alternative to guarantee an int would be to store # the timestamp in nanoseconds [3], but that feels verbose to me. # # [1] - https://stackoverflow.com/questions/23570632/store-datetimes-in-hdf5-with-h5py # [2] - https://docs.python.org/3/library/time.html#time.time # [3] - https://docs.python.org/3/library/time.html#time.time_ns timestamp_ms: float model_file: PathLikeOrString @dataclass(frozen=True) class CLASSIFICATION_PATH: ROOT = f"/Classification/" @classmethod def for_classification_run(cls, classification_run: ClassificationRunId) -> str: path = str(PosixPath(CLASSIFICATION_PATH.ROOT, classification_run)) return path @classmethod def pass_path(cls, classification_run: ClassificationRunId) -> str: """Path to the group that contains the readIds that passed classification during this classification run. Parameters ---------- classification_run : ClassificationRunId A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04"). Returns ------- str Pathlike to path. (e.g. /Classifcation/my_classication_run_04/pass) """ CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run) path = str(PosixPath(CLASSICATION_RUN_PATH, "pass")) return path @classmethod def fail_path(cls, classification_run: ClassificationRunId) -> str: """Path to the group that contains the readIds that failed classification during this classification run. Parameters ---------- classification_run : ClassificationRunId A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04"). Returns ------- str Pathlike to path. (e.g. /Classifcation/my_classication_run_04/fail) """ CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run) path = str(PosixPath(CLASSICATION_RUN_PATH, "fail")) return path def read_id_path(cls, classification_run: ClassificationRunId, read_id: ReadId) -> str: """Path to the group that contains the classification results for a given readId. Parameters ---------- classification_run : ClassificationRunId A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04"). read_id : ReadId The readId of the read we want to know the classification results for. Returns ------- str Path to the group that contains the classification results for a given readId. """ CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run) path = str(PosixPath(CLASSICATION_RUN_PATH, f"{read_id}")) return path @dataclass(frozen=True) class ClassificationResult: """The result of passing the capture data to the classifier. Fields ---------- score : float A value representing the 'score' of a label predicted by the classifier. Abstractly, the score is a measure of confidence that this label is correct, as determined by the score being greater than some threshold. What exact values this score can take on depends on your classifier (e.g. if you pass the final result through a soft-max, this score will represent a probability from 0 to 1.0). label : ClassLabel The label assigned to this prediction. Returns ------- [ClassificationResult] ClassificationResult instance. """ label: ClassLabel score: float # TODO: Finish writing Classifier plugin architecture: https://github.com/uwmisl/poretitioner/issues/91 class ClassifierPlugin(ABC): @abstractmethod def model_name(self) -> str: raise NotImplementedError("model_name hasn't been implemented for this classifier.") @abstractmethod def model_version(self) -> str: raise NotImplementedError("model_version hasn't been implemented for this classifier.") @abstractmethod def model_file(self) -> str: raise NotImplementedError("model_file hasn't been implemented for this classifier.") @abstractmethod def load(self, use_cuda: bool = False): """Loads a model for classification. This method is where you should do any pre processing needed. For exammple, loading and configuring a Pytorch model, or a sci-kit learn model. Parameters ---------- use_cuda : bool Whether to use cuda. Raises ------ NotImplementedError If this method hasn't been implemented. """ raise NotImplementedError("load hasn't been implemented for this classifier.") @abstractmethod def evaluate(self, capture) -> ClassificationResult: raise NotImplementedError("Evaluate hasn't been implemented for this classifier.") # TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92 def filter_and_classify( config, capture_filepaths: List[PathLikeOrString], overwrite=False, filter_name=None ): local_logger = logger.getLogger() clf_config = config["classify"] classifier_name = clf_config["classifier"] classification_path = clf_config["classification_path"] # Load classifier local_logger.info(f"Loading classifier {classifier_name}.") assert classifier_name in ["NTER_cnn", "NTER_rf"] assert classification_path is not None and len(classification_path) > 0 classifier = init_classifier(classifier_name, classification_path) # Filter (optional) TODO: Restore filtering https://github.com/uwmisl/poretitioner/issues/43 https://github.com/uwmisl/poretitioner/issues/68 read_path = "/" # if filter_name is not None: # local_logger.info("Beginning filtering.") # filter.filter_and_store_result(config, fast5_fnames, filter_name, overwrite=overwrite) # read_path = f"/Filter/{filter_name}/pass" # else: # read_path = "/" # Classify classify_fast5_file(f5, clf_config, classifier, classifier_name, read_path) # def classify_file( # capturef5: ClassifierFile, configuration: ClassifierConfiguration, classifier: Classifier, classifier_run_name, read_path, class_labels=None): # for read in capturef5.reads: # pass # TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92 def classify_fast5_file( capture_filepath: PathLikeOrString, clf_config, classifier, classifier_run_name, read_path, class_labels=None, ): local_logger = logger.getLogger() local_logger.debug(f"Beginning classification for file {capture_filepath}.") classifier_name = clf_config["classifier"] classifier_version = clf_config["version"] classifier_location = clf_config["filepath"] classify_start = clf_config["start_obs"] # 100 in NTER paper classify_end = clf_config["end_obs"] # 21000 in NTER paper classifier_confidence_threshold = clf_config["min_confidence"] configuration = ClassifierConfiguration( classifier_name, classifier_version, classify_start, classify_end, classifier_confidence_threshold, ) # details = ClassifierDetails(classifier_name, , , ) # ClassifierFile(filepath, ) details = None # ClassifierDetails(classifier_name, ) assert classify_start >= 0 and classify_end >= 0 assert classifier_confidence_threshold is None or (0 <= classifier_confidence_threshold <= 1) local_logger.debug( f"Classification parameters: name: {classifier_name}, " f"range of data points: ({classify_start}, {classify_end})" f"confidence required to pass: {classifier_confidence_threshold}" ) results_path = f"/Classification/{classifier_run_name}" write_classifier_details(f5, clf_config, results_path) with ClassifierFile(capture_filepath, "r+") as classifier_f5: details = ClassifierDetails( classifier_name, classifier_version, classifier_location, classifier_confidence_threshold, ) classifier_f5.write_details(details) for read in classifier_f5.reads: signal = classifier_f5.get_fractionalized_read( read, start=classify_start, end=classify_end ) labels, probability = predict_class( classifier_name, classifier, signal, class_labels=class_labels ) if classifier_confidence_threshold is not None: passed_classification = probability > classifier_confidence_threshold else: passed_classification = None write_classifier_result() # read_h5group_names = f5.get(read_path) # for grp in read_h5group_names: # if "read" not in grp: # continue # read_id = re.findall(r"read_(.*)", str(grp))[0] # signal = get_fractional_blockage_for_read( # f5, grp, start=classify_start, end=classify_end # ) # y, p = predict_class(classifier_name, classifier, signal, class_labels=class_labels) # if classifier_confidence_threshold is not None: # passed_classification = False if p <= classifier_confidence_threshold else True # else: # passed_classification = None # write_classifier_result(f5, results_path, read_id, y, p, passed_classification) # TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92 # TODO: This classifier initialization should be a special case of a Plugin: https://github.com/uwmisl/poretitioner/issues/91 def init_classifier(classifier_name, classification_path): """Initialize the classification model. Supported classifier names include "NTER_cnn" and "NTER_rf". According to documentation for original NTER code: Prediction classes are 1-9: 0:Y00, 1:Y01, 2:Y02, 3:Y03, 4:Y04, 5:Y05, 6:Y06, 7:Y07, 8:Y08, 9:noise, -1:below conf_thesh Parameters ---------- classifier_name : str The name of any supported classifier, currently "NTER_cnn" and "NTER_rf". classification_path : str Location of the pre-trained model file. Returns ------- model Classification model (type depends on the spceified model). Raises ------ ValueError Raised if the classifier name is not supported. OSError Raised if the classifier path does not exist. """ if classifier_name == "NTER_cnn": # CNN classifier if not os.path.exists(classification_path): raise OSError(f"Classifier path doesn't exist: {classification_path}") nanoporeTER_cnn = pretrained_model.load_cnn(classification_path) return nanoporeTER_cnn elif classifier_name == "NTER_rf": # Random forest classifier if not os.path.exists(classification_path): raise OSError(f"Classifier path doesn't exist: {classification_path}") # TODO : Improve model maintainability : https://github.com/uwmisl/poretitioner/issues/38 # return joblib.load(open(classification_path, "rb")) pass else: raise ValueError(f"Invalid classifier name: {classifier_name}") # TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92 def predict_class(classifier_name, classifier, raw, class_labels=None) -> ClassificationResult: """Runs the classifier using the given raw data as input. Does not apply any kind of confidence threshold. Parameters ---------- classifier_name :
getCurrentSBML(aHandle = None): if aHandle is None: aHandle = gHandle return rrLib.getCurrentSBML(aHandle) ##\brief Retrieve the last SBML model that was loaded #\return Returns False if it fails or no model is loaded, otherwise returns the SBML string def getSBML(aHandle = None): if aHandle is None: aHandle = gHandle return rrLib.getSBML(aHandle) ##@} ##\ingroup parameters #@{ ##\brief Promote any local parameters to global status #\param sArg The string containing SBML model to promote #\return Returns False if it fails, otherwise it returns the promoted SBML string def getParamPromotedSBML(sArg, aHandle = None): if aHandle is None: aHandle = gHandle value = c_char(sArg) if rrLib.getParamPromotedSBML(aHandle, pointer(value)) == True: return value.value else: raise RuntimeError('Index out of range') ##@} ##\ingroup simulation #@{ ##\brief Sets simulator capabilities #\param[out] caps An XML string that specifies the simulators capabilities #\return Returns true if successful def setCapabilities(caps): return rrLib.setCapabilities(gHandle, caps) ##\brief Returns simulator capabilities #\return Returns False if it fails, otherwise returns the simulator's capabilities in the form of an XML string def getCapabilities(): return rrLib.getCapabilities(gHandle) ##\brief Sets the start time for the simulation #\param timeStart #\return Returns True if successful def setTimeStart(timeStart, rrHandle = None): if rrHandle is None: rrHandle = gHandle return rrLib.setTimeStart (rrHandle, c_double(timeStart)) ##\brief Sets the end time for the simulation #\param timeEnd #\return Returns True if successful def setTimeEnd(timeEnd, rrHandle = None): if rrHandle is None: rrHandle = gHandle return rrLib.setTimeEnd(rrHandle, c_double(timeEnd)) ##\brief Set the number of points to generate in a simulation #\param numPoints Number of points to generate #\return Returns True if successful def setNumPoints(numPoints, rrHandle = None): if rrHandle is None: rrHandle = gHandle return rrLib.setNumPoints(rrHandle, numPoints) ##\brief Sets the list of variables returned by simulate() or simulateEx() # #Example: rrPython.setTimeCourseSelectionList ("Time, S1, J1, J2") # #\param list A string of Ids separated by spaces or comma characters #\return Returns True if successful def setTimeCourseSelectionList(myList, rrHandle = None): if rrHandle is None: rrHandle = gHandle if type (myList) == str: return rrLib.setTimeCourseSelectionList(rrHandle, myList) if type (myList) == list: strList = '' for i in range (len(myList)): strList = strList + myList[i] + ' ' return rrLib.setTimeCourseSelectionList(rrHandle, strList) raise RuntimeError('Expecting string or list in setTimeCourseSelectionList') ##\brief Returns the list of variables returned by simulate() or simulateEx() #\return A list of symbol IDs indicating the currect selection list def getTimeCourseSelectionList(): value = rrLib.getTimeCourseSelectionList(gHandle) return stringArrayToList (value) ##\brief Carry out a time-course simulation, use setTimeStart etc to set #characteristics #\return Returns a handle to internal roadRunner data def simulate(aHandle = None): if aHandle is None: aHandle = gHandle return rrLib.simulate(aHandle) ##\brief Carry out a time-course simulation in a thread, use setTimeStart etc to set #characteristics #\return Returns a handle to the thread. Use this handle to see when the thread has finished #def simulateJob(aHandle = None): # if aHandle is None: # aHandle = gHandle # return rrLib.simulateJob(aHandle) ##\brief Check if a job is done #characteristics #\return Returns true/false indicating if a job has finsished #def isJobFinished(aHandle = None): # if aHandle is None: # aHandle = gHandle # return rrLib.isJobFinished(aHandle) ##\brief Carry out a time-course simulation for a thread pool #characteristics #\return Returns a handle to Jobs. Use this handle to see when the jobs have finished #def simulateJobs(rrsHandle, nrOfThreads): # return rrLib.simulateJobs(rrsHandle, nrOfThreads) def writeRRData(rrData, outFile): rrLib.writeRRData(rrData, outFile) def getSimulationResult(aHandle = None): if aHandle is None: aHandle = gHandle result = rrLib.getSimulationResult(aHandle) #TODO: Check result rowCount = rrLib.getRRDataNumRows(result) colCount = rrLib.getRRDataNumCols(result) resultArray = np.zeros((rowCount, colCount)) for m in range(rowCount): for n in range(colCount): rvalue = m cvalue = n value = c_double() if rrLib.getRRDataElement(result, rvalue, cvalue, pointer(value)) == True: resultArray[m, n] = value.value rrLib.freeRRData(result) return resultArray rrLib.createRRCData.restype = c_void_p def createRRCData(rrDataHandle): return rrLib.createRRCData(rrDataHandle) def getNPData(rrcDataHandle): rowCount = rrLib.getRRDataNumRows(rrcDataHandle) colCount = rrLib.getRRDataNumCols(rrcDataHandle) resultArray = np.zeros((rowCount, colCount)) for m in range(rowCount): for n in range(colCount): rvalue = m cvalue = n value = c_double() if rrLib.getRRCDataElement(rrcDataHandle, rvalue, cvalue, pointer(value)) == True: resultArray[m, n] = value.value return resultArray def getSimulationResult2(aHandle = None): if aHandle is None: aHandle = gHandle return rrLib.getSimulationResult(aHandle) def getRoadRunnerData(aHandle = None): if aHandle is None: aHandle = gHandle return rrLib.getRoadRunnerData(aHandle) #use getRRCDataElement and other helper routines to build array that can be used in numpy to plot with matplotlib #get num cols, get num rows, create array, fill array with two loops ##\brief Carry out a time-course simulation based on the given arguments # #Example: m = rrPython.simulateEx(0, 25, 200) # #\return Returns a handle to roadrunners internal data object rrLib.simulateEx.restype = c_void_p def simulateEx(timeStart, timeEnd, numberOfPoints): startValue = c_double(timeStart) endValue = c_double(timeEnd) nrPoints = c_int(numberOfPoints) return rrLib.simulateEx(gHandle, startValue, endValue, nrPoints) ##\brief Carry out a one step integration of the model # #Example: status = rrPython.oneStep(currentTime, stepSize) # #\param[in] currentTime The current time in the simulation #\param[in] stepSize The step size to use in the integration #\param[in] value The new time (currentTime + stepSize) #Takes (double, double) as an argument #\return def oneStep (currentTime, stepSize): #test this curtime = c_double(currentTime) stepValue = c_double(stepSize) value = c_double() if rrLib.oneStep(gHandle, (curtime), (stepValue), pointer(value)) == True: return value.value; else: raise RuntimeError('Index out of range') ##\brief Returns the simulation start time # #Example: status = rrPython.getTimeStart() # #\return Returns the simulation start time as a float def getTimeStart(): value = c_double() if rrLib.getTimeStart(gHandle, pointer(value)) == True: return value.value else: return ('Index out of Range') ##\brief Returns the simulation end time # #Example: status = rrPython.getTimeEnd() # #\return Returns the simulation end Time as a float def getTimeEnd(): value = c_double() if rrLib.getTimeEnd(gHandle, pointer(value)) == True: return value.value else: return ('Index out of Range') ##\brief Returns the value of the current number of points # #Example: status = rrPython.getNumPoints() # #\return Returns the value of the number of points def getNumPoints(): value = c_int() if rrLib.getNumPoints(gHandle, pointer(value)) == True: return value.value else: return ('Index out of Range') ##\brief Reset all floating species concentrations to their intial conditions # #Example: status = rrPython.reset() # #\return Returns True if successful def reset(): return rrLib.reset(gHandle) ##@} ##\ingroup steadystate #@{ ##\brief Computes the steady state of the loaded model # #Example: status = rrPython.steadyState() # #\return Returns a value that is set during the call that indicates how close the solution is to the steady state. The smaller the value, the better. def steadyState(): value = c_double() if rrLib.steadyState(gHandle, pointer(value)) == True: return value.value else: return (GetLastError()) ##\brief A convenient method for returning a vector of the steady state species concentrations # #Example: values = rrPython.computeSteadyStateValues() # #\return Returns the vector of steady state values or NONE if an error occurred. def computeSteadyStateValues(): values = rrLib.computeSteadyStateValues(gHandle) if values == None: raise RuntimeError(getLastError()) return rrVectorToPythonArray (values) ##\brief Set the selection list of the steady state analysis # #param[in] list The string argument should be a space-separated list of symbols in the selection list # #\return Returns True if successful def setSteadyStateSelectionList(aList): if type (aList) == str: value = c_char_p(aList) return rrLib.setSteadyStateSelectionList(gHandle, value) if type (aList) == list: strList = '' for i in range (len(aList)): strList = strList + aList[i] + ' ' value = c_char_p (strList) return rrLib.setSteadyStateSelectionList(gHandle, strList) raise RuntimeError('Expecting string or list in setTimeCourseSelectionList') ##\brief Get the selection list for the steady state analysis #\return Returns False if it fails, otherwise it returns a list of strings representing symbols in the selection list def getSteadyStateSelectionList(): value = rrLib.getSteadyStateSelectionList(gHandle) return stringArrayToList (value) ##@} ##\ingroup state #@{ ##\brief Get the value for a given symbol, use getAvailableSymbols() for a list of symbols # #Example: status = rrPython.getValue("S1") # #\param symbolId The symbol that we wish to obtain the value for #\return Returns the value if successful, otherwise returns False def getValue(symbolId, rrHandle = None): if rrHandle is None: rrHandle = gHandle value = c_double() if rrLib.getValue(rrHandle, symbolId, pointer(value)) == True: return value.value else: raise RuntimeError(getLastError() + ': ' + symbolId) ##\brief Set the value for a given symbol, use getAvailableSymbols() for a list of symbols # #Example: status = rrPython.setValue("S1", 0.5) # #\param symbolId The symbol that we wish to set the value for #\param value The value that the symbol will be set to #\return Returns True if successful def setValue(symbolId, value, rrHandle = None): if rrHandle is None: rrHandle = gHandle if rrLib.setValue(rrHandle, symbolId, c_double(value)) == True: return True else: raise RuntimeError('Index out of range') ##@} ##\ingroup floating #@{ ##\brief Retrieve a string containing concentrations for all the floating species # #Example: values = rrPython.getFloatingSpeciesConcentrations() # #\return Returns a string of floating species concentrations or None if an error occured def getFloatingSpeciesConcentrations(): values = rrLib.getFloatingSpeciesConcentrations(gHandle) return rrVectorToPythonArray (values) ##\brief Sets the concentration for a floating species by its index. Species are indexed starting at 0. # #Example: rrPython.setFloatingSpeciesByIndex(0, .5) # #\param index The index to the floating species (corresponds to position in getFloatingSpeciesIds()) starting at 0 #\param value The concentration of the species to set #\return Returns True if successful def setFloatingSpeciesByIndex(index, value):
# by <EMAIL> at Thu Nov 28 22:09:47 CET 2019 """GPG-compatible symmetric key encryption and decryption code. Imports PyCrypto.Cipher._* or tinygpgs.cipher lazily. Imports hashlib or tinygpgs.hash lazily. """ import binascii import struct from tinygpgs.pyx import iteritems, buffer, binary_type, xrange, to_hex_str, is_buffer_slice, is_buffer_join, ensure_binary, callable, is_python_function, ensure_str, int_from_bytes_be from tinygpgs.strxor import make_strxor, fast_strxor def new_hash(hash, data=b'', is_slow_hash=False, _slow_hashes={}): hash = hash.replace('-', '').lower() if not is_slow_hash: try: import hashlib # Python >= 2.5. return hashlib.new(hash, data) except (ImportError, ValueError): pass # .upper() happens to work for SLOW_HASHES.keys(). module_name = 'Crypto.Hash._' + hash.upper() import sys try: __import__(module_name) digest_cons = sys.modules[module_name].new except (ImportError, AttributeError): digest_cons = None if callable(digest_cons): return digest_cons(data) if hash == 'sha1': try: # Python 2.4. import sha return sha.sha(data) except ImportError: pass if hash == 'md5': try: # Python 2.4. import md5 return md5.md5(data) except ImportError: pass if not _slow_hashes: try: # Lazy import of tinygpgs.hash makes startup with PyCrypto fast. from tinygpgs import hash as hash_mod _slow_hashes.update(hash_mod.__dict__) except ImportError: _slow_hashes[0] = 0 # Not empty. digest_cons = _slow_hashes.get('Slow_' + hash) if callable(digest_cons): return digest_cons(data) raise ValueError('Unsupported hash: %s' % hash) # Never happens. class BadCfbCipher(ValueError): """Raised when a cipher cannot be created in CFB mode.""" # {name: (module_name, block_size, keytable_size)}. CIPHER_INFOS = { 'idea': ('IDEA', 8, 16), # Doesn't exist in PyCrypto <=2.7. 'cast5': ('CAST', 8, 16), 'cast5-128': ('CAST', 8, 16), 'cast128': ('CAST', 8, 16), 'aes-128': ('AES', 16, 16), 'aes-192': ('AES', 16, 24), 'aes-256': ('AES', 16, 32), 'des3': ('DES3', 8, 24), '3des': ('DES3', 8, 24), 'des': ('DES', 8, 8), 'blowfish': ('Blowfish', 8, 16), 'twofish-128': ('Twofish', 16, 16), # Doesn't exist in PyCrypto <=2.7. 'twofish-256': ('Twofish', 16, 32), # Doesn't exist in PyCrypto <=2.7. } def get_cipher_cons(cipher, is_slow_cipher, is_cfb, _slow_ciphers={}): """Returns (cons, block_size).""" cipher = cipher.lower() info = CIPHER_INFOS.get(cipher) if not info: raise ValueError('Unknown cipher: %s' % cipher) module_name, block_size, keytable_size = info if is_slow_cipher and is_cfb: raise BadCfbCipher('Slow cipher %s does not support CFB.' % cipher) if not is_slow_cipher: cname = 'Crypto.Cipher._' + module_name cons = None import sys try: __import__(cname) cons = getattr(sys.modules[cname], 'new', None) except ImportError: pass if callable(cons): # Fast, C extension. return cons, block_size, keytable_size if cipher == 'aes': # Fallback fast C, extension. cons2 = None try: cons2 = getattr(__import__('aes'), 'Keysetup', None) except ImportError: # https://pypi.org/project/alo-aes/ cons2 = None if callable(cons2): cons = cons2 if not _slow_ciphers: try: # Lazy import of tinygpgs.cipher makes startup with PyCrypto fast. from tinygpgs import cipher as cipher_mod _slow_ciphers.update(cipher_mod.__dict__) except ImportError: _slow_ciphers[0] = 0 # Not empty. fallback_cons = _slow_ciphers.get(module_name) if callable(fallback_cons): if is_cfb: raise BadCfbCipher('Fallback cipher %s does not support CFB.' % cipher) return fallback_cons, block_size, keytable_size raise ValueError('Unimplemented cipher: %s' % cipher) # Never happens. # --- GPG misc: used for both encryption and decryption. CRC24_TABLE = ( 0x000000, 0x864cfb, 0x8ad50d, 0x0c99f6, 0x93e6e1, 0x15aa1a, 0x1933ec, 0x9f7f17, 0xa18139, 0x27cdc2, 0x2b5434, 0xad18cf, 0x3267d8, 0xb42b23, 0xb8b2d5, 0x3efe2e, 0xc54e89, 0x430272, 0x4f9b84, 0xc9d77f, 0x56a868, 0xd0e493, 0xdc7d65, 0x5a319e, 0x64cfb0, 0xe2834b, 0xee1abd, 0x685646, 0xf72951, 0x7165aa, 0x7dfc5c, 0xfbb0a7, 0x0cd1e9, 0x8a9d12, 0x8604e4, 0x00481f, 0x9f3708, 0x197bf3, 0x15e205, 0x93aefe, 0xad50d0, 0x2b1c2b, 0x2785dd, 0xa1c926, 0x3eb631, 0xb8faca, 0xb4633c, 0x322fc7, 0xc99f60, 0x4fd39b, 0x434a6d, 0xc50696, 0x5a7981, 0xdc357a, 0xd0ac8c, 0x56e077, 0x681e59, 0xee52a2, 0xe2cb54, 0x6487af, 0xfbf8b8, 0x7db443, 0x712db5, 0xf7614e, 0x19a3d2, 0x9fef29, 0x9376df, 0x153a24, 0x8a4533, 0x0c09c8, 0x00903e, 0x86dcc5, 0xb822eb, 0x3e6e10, 0x32f7e6, 0xb4bb1d, 0x2bc40a, 0xad88f1, 0xa11107, 0x275dfc, 0xdced5b, 0x5aa1a0, 0x563856, 0xd074ad, 0x4f0bba, 0xc94741, 0xc5deb7, 0x43924c, 0x7d6c62, 0xfb2099, 0xf7b96f, 0x71f594, 0xee8a83, 0x68c678, 0x645f8e, 0xe21375, 0x15723b, 0x933ec0, 0x9fa736, 0x19ebcd, 0x8694da, 0x00d821, 0x0c41d7, 0x8a0d2c, 0xb4f302, 0x32bff9, 0x3e260f, 0xb86af4, 0x2715e3, 0xa15918, 0xadc0ee, 0x2b8c15, 0xd03cb2, 0x567049, 0x5ae9bf, 0xdca544, 0x43da53, 0xc596a8, 0xc90f5e, 0x4f43a5, 0x71bd8b, 0xf7f170, 0xfb6886, 0x7d247d, 0xe25b6a, 0x641791, 0x688e67, 0xeec29c, 0x3347a4, 0xb50b5f, 0xb992a9, 0x3fde52, 0xa0a145, 0x26edbe, 0x2a7448, 0xac38b3, 0x92c69d, 0x148a66, 0x181390, 0x9e5f6b, 0x01207c, 0x876c87, 0x8bf571, 0x0db98a, 0xf6092d, 0x7045d6, 0x7cdc20, 0xfa90db, 0x65efcc, 0xe3a337, 0xef3ac1, 0x69763a, 0x578814, 0xd1c4ef, 0xdd5d19, 0x5b11e2, 0xc46ef5, 0x42220e, 0x4ebbf8, 0xc8f703, 0x3f964d, 0xb9dab6, 0xb54340, 0x330fbb, 0xac70ac, 0x2a3c57, 0x26a5a1, 0xa0e95a, 0x9e1774, 0x185b8f, 0x14c279, 0x928e82, 0x0df195, 0x8bbd6e, 0x872498, 0x016863, 0xfad8c4, 0x7c943f, 0x700dc9, 0xf64132, 0x693e25, 0xef72de, 0xe3eb28, 0x65a7d3, 0x5b59fd, 0xdd1506, 0xd18cf0, 0x57c00b, 0xc8bf1c, 0x4ef3e7, 0x426a11, 0xc426ea, 0x2ae476, 0xaca88d, 0xa0317b, 0x267d80, 0xb90297, 0x3f4e6c, 0x33d79a, 0xb59b61, 0x8b654f, 0x0d29b4, 0x01b042, 0x87fcb9, 0x1883ae, 0x9ecf55, 0x9256a3, 0x141a58, 0xefaaff, 0x69e604, 0x657ff2, 0xe33309, 0x7c4c1e, 0xfa00e5, 0xf69913, 0x70d5e8, 0x4e2bc6, 0xc8673d, 0xc4fecb, 0x42b230, 0xddcd27, 0x5b81dc, 0x57182a, 0xd154d1, 0x26359f, 0xa07964, 0xace092, 0x2aac69, 0xb5d37e, 0x339f85, 0x3f0673, 0xb94a88, 0x87b4a6, 0x01f85d, 0x0d61ab, 0x8b2d50, 0x145247, 0x921ebc, 0x9e874a, 0x18cbb1, 0xe37b16, 0x6537ed, 0x69ae1b, 0xefe2e0, 0x709df7, 0xf6d10c, 0xfa48fa, 0x7c0401, 0x42fa2f, 0xc4b6d4, 0xc82f22, 0x4e63d9, 0xd11cce, 0x575035, 0x5bc9c3, 0xdd8538, ) if b'\0'[0]: # Python 2, iterating over str yields 1-char strs. def crc24(data, crc=0xb704ce): if not isinstance(data, (binary_type, buffer)): raise TypeError table = CRC24_TABLE for c in data: crc = (table[((crc >> 16) ^ ord(c)) & 0xff] ^ (crc << 8)) & 0x00ffffff return crc else: def crc24(data, crc=0xb704ce): if not isinstance(data, (binary_type, buffer)): raise TypeError table = CRC24_TABLE for c in data: crc = (table[((crc >> 16) ^ c) & 0xff] ^ (crc << 8)) & 0x00ffffff return crc def str_to_fread(data): if not isinstance(data, (binary_type, buffer)): raise TypeError i_ary = [0] def fread_from_str(size): i = i_ary[0] result = data[i : i + size] i_ary[0] = i + len(result) return result return fread_from_str # GPG defaults: https://en.wikipedia.org/wiki/GNU_Privacy_Guard says that # CAST5 has been the default in GPG <2.1 (confirmed for 1.0.6 in # 2001-06-01). In 2.1, AES-128 became the default. In 2.2, AES-256 became # the default (confirmed for 2.2.17 in 2019-07-09). CIPHER_ALGOS = { # --cipher-algo=... . #0: 'unencrypted', # gpg(1) can't decrypt it either. 1: 'idea', 2: '3des', # With 192-bit key. 3: 'cast5', # With 128-bit key. 4: 'blowfish', # With 128-bit key, 16 rounds. 7: 'aes-128', 8: 'aes-192', 9: 'aes-256', 10: 'twofish-256', 302: 'des', # Single-key, 56-bit (64-bit?) DES. 303: 'twofish-128', } # https://github.com/gpg/libgcrypt/blob/e5c4cf0efb8fd297963e6b4392ab98c41dbad536/src/gcrypt.h.in#L919 CIPHER_ALGOS_ALL = { 0: 'unencrypted', # gpg(1) can't decrypt it either. 1: 'idea', 2: '3des', # With 192-bit key. 3: 'cast5', # With 128-bit key. 4: 'blowfish', # With 128-bit key, 16 rounds. 5: 'safer-sk128', 6: 'des-sk', 7: 'aes-128', 8: 'aes-192', 9: 'aes-256', 10: 'twofish-256', 301: 'rc4', # 'arcfour'. 302: 'des', # Single-key, 56-bit (64-bit?) DES. 303: 'twofish-128', 304: 'serpent-128', 305: 'serpent-192', 306: 'serpent-256', 307: 'rfc2268-ron-40', 308: 'rfc2268-ron-128', 309: 'rfc4269-seed', 310: 'camellia-128', 311: 'camellia-192', 312: 'camellia-256', 313: 'salsa20', 314: 'salsa20-r12', 315: 'rfc-5830-gost', # GOST 28147-89. 316: 'chacha20', } KEYTABLE_SIZES = dict((_k, CIPHER_INFOS[_v][2]) for _k, _v in iteritems(CIPHER_ALGOS)) DIGEST_ALGOS = { # --digest-algo=..., --s2k-digest-algo=... . 1: 'md5', 2: 'sha1', 3: 'ripemd160', 8: 'sha256', 9: 'sha384', 10: 'sha512', 11: 'sha224', } S2K_MODES = { 0: 'simple', 1: 'salted', 3: 'iterated-salted', } COMPRESS_ALGOS = { 0: 'uncompressed', 1: 'zip', 2: 'zlib', 3: 'bzip2', } def iter_to_fread(iter_str): _buffer = buffer data_ary, iter_str = [_buffer(b'')], iter(iter_str) do_binary = is_buffer_slice do_binary_before_join = is_buffer_slice and not is_buffer_join def fread_from_iter(size): data = data_ary[0] if isinstance(size, tuple): # ``Give me whatever you have buffered''. min_size, mod = size ld = len(data) ldm = ld - ld % mod if ldm < min_size: return b'' result, data_ary[0] = _buffer(data, 0, ldm), _buffer(data, ldm) return result if size <= 0: return b'' result, data = data[:size], _buffer(data, size) if len(result) < size: remaining = size - len(result) result = [result] while remaining > 0: for data in iter_str: break else: break # Out of the `while' loop. # This may be a long copy, but there is no way around it. result.append(data[:remaining]) data = _buffer(data, len(result[-1])) remaining -= len(result[-1]) if do_binary_before_join: result = b''.join(map(binary_type, result)) else: # Python 2.x and >=3.4. result = b''.join(result) elif do_binary: result = binary_type(result) data_ary[0] = data return result return fread_from_iter def iter_to_fread_or_all(iter_str): _buffer = buffer data_ary, iter_str = [_buffer(b'')], iter(iter_str) do_binary = is_buffer_slice do_binary_before_join = is_buffer_slice and not is_buffer_join def fread_from_iter(size=()): data = data_ary[0] if size == (): # Read everything until EOF. result, data_ary[0] = [data[:]], b'' # This may be a long copy, but there is no way around it. result.extend(iter_str) return b''.join(result) if size <= 0: return b'' result, data = data[:size], _buffer(data, size) if len(result) < size: remaining = size - len(result) result = [result] while remaining > 0: for data in iter_str: break else: break # Out of the `while' loop. # This may be a long copy, but there is
from operator import attrgetter import numpy as np from neupy.utils import format_data from neupy.exceptions import StopTraining from neupy.algorithms.base import BaseNetwork from neupy.core.properties import (NumberProperty, ProperFractionProperty, IntProperty) __all__ = ('GrowingNeuralGas', 'NeuralGasGraph', 'NeuronNode') def sample_data_point(data, n=1): indeces = np.random.choice(len(data), n) return data[indeces, :] def make_edge_id(node_1, node_2): return (node_1, node_2) if node_1 < node_2 else (node_2, node_1) class NeuralGasGraph(object): """ Undirected graph structure that stores neural gas network's neurons and connections between them. Attributes ---------- edges_per_node : dict Dictionary that where key is a unique node and value is a list of nodes that connection to this edge. edges : dict Dictonary that stores age per each connection. Ech key will have the following format: ``(node_1, node_2)``. nodes : list List of all nodes in the graph. n_nodes : int Number of nodes in the network. """ def __init__(self): self.edges_per_node = {} self.edges = {} @property def nodes(self): return list(self.edges_per_node.keys()) @property def n_nodes(self): return len(self.edges_per_node) def add_node(self, node): self.edges_per_node[node] = set() def remove_node(self, node): del self.edges_per_node[node] def add_edge(self, node_1, node_2): self.edges_per_node[node_1].add(node_2) self.edges_per_node[node_2].add(node_1) edge_id = make_edge_id(node_1, node_2) self.edges[edge_id] = 0 def remove_edge(self, node_1, node_2): self.edges_per_node[node_1].remove(node_2) self.edges_per_node[node_2].remove(node_1) edge_id = make_edge_id(node_1, node_2) del self.edges[edge_id] class NeuronNode(object): """ Structure representes neuron in the Neural Gas algorithm. Attributes ---------- weight : 2d-array Neuron's position in the space. error : float Error accumulated during the training. """ def __init__(self, weight): self.weight = weight self.error = 0 def __lt__(self, other): return id(self) < id(other) class GrowingNeuralGas(BaseNetwork): """ Growing Neural Gas (GNG) algorithm. Current algorithm has two modifications that hasn't been mentioned in the paper, but they help to speed up training. - The ``n_start_nodes`` parameter provides possibility to increase number of nodes during initialization step. It's usefull when algorithm takes a lot of time building up large amount of neurons. - The ``min_distance_for_update`` parameter allows to speed up training when some data samples has eurons very close to them. The ``min_distance_for_update`` parameter controls threshold for the minimum distance for which we will want to update weights. Parameters ---------- n_inputs : int Number of features in each sample. n_start_nodes : int Number of nodes that algorithm generates from the data during the initialization step. Defaults to ``2``. step : float Step (learning rate) for the neuron winner. Defaults to ``0.2``. neighbour_step : float Step (learning rate) for the neurons that connected via edges with neuron winner. This value typically has to be smaller than ``step`` value. Defaults to ``0.05``. max_edge_age : int It means that if edge won't be updated for ``max_edge_age`` iterations than it would be removed. The larger the value the more updates we allow to do before removing edge. Defaults to ``100``. n_iter_before_neuron_added : int Each ``n_iter_before_neuron_added`` weight update algorithm add new neuron. The smaller the value the more frequently algorithm adds new neurons to the network. Defaults to ``1000``. error_decay_rate : float This error decay rate would be applied to every neuron in the graph after each training iteration. It ensures that old errors will be reduced over time. Defaults to ``0.995``. after_split_error_decay_rate : float This decay rate reduces error for neurons with largest errors after algorithm added new neuron. This value typically lower than ``error_decay_rate``. Defaults to ``0.5``. max_nodes : int Maximum number of nodes that would be generated during the training. This parameter won't stop training when maximum number of nodes will be exceeded. Defaults to ``1000``. min_distance_for_update : float Parameter controls for which neurons we want to apply updates. In case if euclidian distance between data sample and closest neurons will be less than the ``min_distance_for_update`` value than update would be skipped for this data sample. Setting value to zero will disable effect provided by this parameter. Defaults to ``0``. {BaseNetwork.show_epoch} {BaseNetwork.shuffle_data} {BaseNetwork.epoch_end_signal} {BaseNetwork.train_end_signal} {Verbose.verbose} Methods ------- train(input_train, summary='table', epochs=100) Network learns topological structure of the data. Learned topolog is stored in the ``graph`` attribute. {BaseSkeleton.fit} initialize_nodes(data) Network initializes nodes randomly sampling ``n_start_nodes`` from the data. It would be applied automatically before the training in case if graph is empty. Note: Node re-initialization can reset network. Notes ----- - Unlike other algorithms this network doesn't make predictions. Intead, it learns topological structure of the data in form of the graph. After that training, stucture of the network can be extracted from the ``graph`` attribute. - In order to speed up training, it might be useful to increase the ``n_start_nodes`` parameter. - During the training it happens that nodes learn topological structure of one part of the data better than the other, mostly because of the different data sample density in different places. Increasing the ``min_distance_for_update`` can speed up training ignoring updates for the neurons that very close to the data sample. (below specified ``min_distance_for_update`` value). Training can be stopped in case if none of the neurons has been updated during the training epoch. Attributes ---------- graph : NeuralGasGraph instance This attribute stores all neurons and connections between them in the form of undirected graph. {BaseNetwork.Attributes} Examples -------- >>> from neupy import algorithms >>> from sklearn.datasets import make_blobs >>> >>> data, _ = make_blobs( ... n_samples=1000, ... n_features=2, ... centers=2, ... cluster_std=0.4, ... ) >>> >>> neural_gas = algorithms.GrowingNeuralGas( ... n_inputs=2, ... shuffle_data=True, ... verbose=True, ... max_edge_age=10, ... n_iter_before_neuron_added=50, ... max_nodes=100, ... ) >>> neural_gas.graph.n_nodes 100 >>> len(neural_gas.graph.edges) 175 >>> edges = list(neural_gas.graph.edges.keys()) >>> neuron_1, neuron_2 = edges[0] >>> >>> neuron_1.weight array([[-6.77166299, 2.4121606 ]]) >>> neuron_2.weight array([[-6.829309 , 2.27839633]]) References ---------- [1] A Growing Neural Gas Network Learns Topologies, <NAME> """ n_inputs = IntProperty(minval=1, required=True) n_start_nodes = IntProperty(minval=2, default=2) step = NumberProperty(default=0.2, minval=0) neighbour_step = NumberProperty(default=0.05, minval=0) max_edge_age = IntProperty(default=100, minval=1) max_nodes = IntProperty(default=1000, minval=1) n_iter_before_neuron_added = IntProperty(default=1000, minval=1) after_split_error_decay_rate = ProperFractionProperty(default=0.5) error_decay_rate = ProperFractionProperty(default=0.995) min_distance_for_update = NumberProperty(default=0.0, minval=0) def __init__(self, *args, **kwargs): super(GrowingNeuralGas, self).__init__(*args, **kwargs) self.n_updates = 0 self.graph = NeuralGasGraph() def format_input_data(self, input_data): is_feature1d = self.n_inputs == 1 input_data = format_data(input_data, is_feature1d) if input_data.ndim != 2: raise ValueError("Cannot make prediction, because input " "data has more than 2 dimensions") n_samples, n_features = input_data.shape if n_features != self.n_inputs: raise ValueError("Input data expected to have {} features, " "but got {}".format(self.n_inputs, n_features)) return input_data def initialize_nodes(self, data): self.graph = NeuralGasGraph() for sample in sample_data_point(data, n=self.n_start_nodes): self.graph.add_node(NeuronNode(sample.reshape(1, -1))) def train(self, input_train, summary='table', epochs=100): input_train = self.format_input_data(input_train) if not self.graph.nodes: self.initialize_nodes(input_train) return super(GrowingNeuralGas, self).train( input_train=input_train, target_train=None, input_test=None, target_test=None, epochs=epochs, epsilon=None, summary=summary) def train_epoch(self, input_train, target_train=None): graph = self.graph step = self.step neighbour_step = self.neighbour_step max_nodes = self.max_nodes max_edge_age = self.max_edge_age error_decay_rate = self.error_decay_rate after_split_error_decay_rate = self.after_split_error_decay_rate n_iter_before_neuron_added = self.n_iter_before_neuron_added # We square this value, because we deal with # squared distances during the training. min_distance_for_update = np.square(self.min_distance_for_update) n_samples = len(input_train) total_error = 0 did_update = False for sample in input_train: nodes = graph.nodes weights = np.concatenate([node.weight for node in nodes]) distance = np.linalg.norm(weights - sample, axis=1) neuron_ids = np.argsort(distance) closest_neuron_id, second_closest_id = neuron_ids[:2] closest_neuron = nodes[closest_neuron_id] second_closest = nodes[second_closest_id] total_error += distance[closest_neuron_id] if distance[closest_neuron_id] < min_distance_for_update: continue self.n_updates += 1 did_update = True closest_neuron.error += distance[closest_neuron_id] closest_neuron.weight += step * (sample - closest_neuron.weight) graph.add_edge(closest_neuron, second_closest) for to_neuron in list(graph.edges_per_node[closest_neuron]): edge_id = make_edge_id(to_neuron, closest_neuron) age = graph.edges[edge_id] if age >= max_edge_age: graph.remove_edge(to_neuron, closest_neuron) if not graph.edges_per_node[to_neuron]: graph.remove_node(to_neuron) else: graph.edges[edge_id] += 1 to_neuron.weight += neighbour_step * ( sample - to_neuron.weight) time_to_add_new_neuron = ( self.n_updates % n_iter_before_neuron_added == 0 and graph.n_nodes < max_nodes) if time_to_add_new_neuron: nodes = graph.nodes largest_error_neuron = max(nodes, key=attrgetter('error')) neighbour_neuron = max( graph.edges_per_node[largest_error_neuron], key=attrgetter('error')) largest_error_neuron.error *= after_split_error_decay_rate neighbour_neuron.error *= after_split_error_decay_rate new_weight = 0.5 * ( largest_error_neuron.weight + neighbour_neuron.weight ) new_neuron = NeuronNode(weight=new_weight.reshape(1, -1)) graph.remove_edge(neighbour_neuron, largest_error_neuron) graph.add_node(new_neuron) graph.add_edge(largest_error_neuron, new_neuron) graph.add_edge(neighbour_neuron, new_neuron) for node in graph.nodes: node.error *= error_decay_rate if not did_update and min_distance_for_update != 0 and n_samples > 1: raise StopTraining( "Distance between every data sample and neurons, closest " "to them, is less then {}".format(min_distance_for_update)) return total_error / n_samples def predict(self, *args, **kwargs): raise NotImplementedError( "Growing Neural Gas algorithm doesn't make prediction. " "It only learns graph structure
**custom_properties): # Add agent as normal self.add_agent(location, agent, name, customizable_properties, sense_capability, is_traversable, team, possible_actions, is_movable, visualize_size, visualize_shape, visualize_colour, visualize_depth, visualize_opacity, **custom_properties) # Get the last settings (which we just added) and add the probability self.agent_settings[-1]['probability'] = probability def add_object(self, location, name, callable_class=None, customizable_properties=None, is_traversable=None, is_movable=None, visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None, visualize_opacity=None, **custom_properties): if callable_class is None: callable_class = EnvObject # Check if location and agent are of correct type assert isinstance(location, list) or isinstance(location, tuple) assert isinstance(callable_class, Callable) # Load default parameters if not passed if is_movable is None: is_movable = get_default_value(class_name="EnvObject", property_name="is_movable") # If default variables are not given, assign them (most empty, except of sense_capability that defaults to all # objects with infinite range). if custom_properties is None: custom_properties = {} if customizable_properties is None: customizable_properties = [] # Define a settings dictionary with all we need to register and add an agent to the GridWorld object_setting = {"callable_class": callable_class, "custom_properties": custom_properties, "customizable_properties": customizable_properties, "mandatory_properties": { "name": name, "is_traversable": is_traversable, "visualize_size": visualize_size, "visualize_shape": visualize_shape, "visualize_colour": visualize_colour, "visualize_depth": visualize_depth, "visualize_opacity": visualize_opacity, "is_movable": is_movable, "location": location} } self.object_settings.append(object_setting) def add_object_prospect(self, location, name, probability, callable_class=None, customizable_properties=None, is_traversable=None, visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None, visualize_opacity=None, **custom_properties): # Add object as normal self.add_object(location, name, callable_class, customizable_properties, is_traversable, visualize_size, visualize_shape, visualize_colour, visualize_depth, visualize_opacity, **custom_properties) # Get the last settings (which we just added) and add the probability self.object_settings[-1]['probability'] = probability def add_multiple_objects(self, locations, names=None, callable_classes=None, custom_properties=None, customizable_properties=None, is_traversable=None, visualize_sizes=None, visualize_shapes=None, visualize_colours=None, visualize_depths=None, visualize_opacities=None, is_movable=None): # If any of the lists are not given, fill them with None and if they are a single value of its expected type we # copy it in a list. A none value causes the default value to be loaded. if is_movable is None: is_movable = [None for _ in range(len(locations))] elif isinstance(is_movable, bool): is_movable = [is_movable for _ in range(len(locations))] if callable_classes is None: callable_classes = [EnvObject for _ in range(len(locations))] elif isinstance(callable_classes, Callable): callable_classes = [callable_classes for _ in range(len(locations))] if names is None: names = [callable_class.__name__ for callable_class in callable_classes] elif isinstance(names, str): names = [names for _ in range(len(locations))] if custom_properties is None: custom_properties = [{} for _ in range(len(locations))] elif isinstance(custom_properties, dict): custom_properties = [custom_properties for _ in range(len(locations))] if customizable_properties is None: customizable_properties = [None for _ in range(len(locations))] elif not any(isinstance(el, list) for el in customizable_properties): customizable_properties = [customizable_properties for _ in range(len(locations))] if is_traversable is None: is_traversable = [None for _ in range(len(locations))] elif isinstance(is_traversable, bool): is_traversable = [is_traversable for _ in range(len(locations))] if visualize_sizes is None: visualize_sizes = [None for _ in range(len(locations))] elif isinstance(visualize_sizes, int): visualize_sizes = [visualize_sizes for _ in range(len(locations))] if visualize_shapes is None: visualize_shapes = [None for _ in range(len(locations))] elif isinstance(visualize_shapes, int): visualize_shapes = [visualize_shapes for _ in range(len(locations))] if visualize_colours is None: visualize_colours = [None for _ in range(len(locations))] elif isinstance(visualize_colours, str): visualize_colours = [visualize_colours for _ in range(len(locations))] if visualize_opacities is None: visualize_opacities = [None for _ in range(len(locations))] elif isinstance(visualize_opacities, int): visualize_opacities = [visualize_opacities for _ in range(len(locations))] if visualize_depths is None: visualize_depths = [None for _ in range(len(locations))] elif isinstance(visualize_depths, str): visualize_depths = [visualize_depths for _ in range(len(locations))] # Loop through all agents and add them for idx in range(len(locations)): self.add_object(location=locations[idx], name=names[idx], callable_class=callable_classes[idx], customizable_properties=customizable_properties[idx], is_traversable=is_traversable[idx], is_movable=is_movable[idx], visualize_size=visualize_sizes[idx], visualize_shape=visualize_shapes[idx], visualize_colour=visualize_colours[idx], visualize_depth=visualize_depths[idx], visualize_opacity=visualize_opacities[idx], **custom_properties[idx]) def add_human_agent(self, location, agent, name="HumanAgent", customizable_properties=None, sense_capability=None, is_traversable=None, team=None, possible_actions=None, is_movable=None, visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None, visualize_opacity=None, key_action_map=None, **custom_properties): # Check if location and agent are of correct type assert isinstance(location, list) or isinstance(location, tuple) assert isinstance(agent, HumanAgentBrain) for existingAgent in self.agent_settings: if existingAgent["mandatory_properties"]["name"] == name: raise Exception(f"A human agent with the name {name} was already added. Agent names should be unique.", name) # Load the defaults for any variable that is not defined # Obtain any defaults from the defaults.json file if not set already. if is_traversable is None: is_traversable = get_default_value(class_name="AgentBody", property_name="is_traversable") if visualize_size is None: visualize_size = get_default_value(class_name="AgentBody", property_name="visualize_size") if visualize_shape is None: visualize_shape = get_default_value(class_name="AgentBody", property_name="visualize_shape") if visualize_colour is None: visualize_colour = get_default_value(class_name="AgentBody", property_name="visualize_colour") if visualize_opacity is None: visualize_opacity = get_default_value(class_name="AgentBody", property_name="visualize_opacity") if visualize_depth is None: visualize_depth = get_default_value(class_name="AgentBody", property_name="visualize_depth") if possible_actions is None: possible_actions = get_default_value(class_name="AgentBody", property_name="possible_actions") if is_movable is None: is_movable = get_default_value(class_name="AgentBody", property_name="is_movable") # If default variables are not given, assign them (most empty, except of sense_capability that defaults to all # objects with infinite range). if custom_properties is None: custom_properties = {} if sense_capability is None: sense_capability = create_sense_capability([], []) # Create sense capability that perceives all if customizable_properties is None: customizable_properties = [] # Check if the agent is of HumanAgent, if not; use the add_agent method inh_path = get_inheritence_path(agent.__class__) if 'HumanAgent' not in inh_path: Exception(f"You are adding an agent that does not inherit from HumanAgent with the name {name}. Use " f"factory.add_agent to add autonomous agents.") # Append the user input map to the custom properties custom_properties["key_action_map"] = key_action_map # Define a settings dictionary with all we need to register and add an agent to the GridWorld hu_ag_setting = {"agent": agent, "custom_properties": custom_properties, "customizable_properties": customizable_properties, "sense_capability": sense_capability, "mandatory_properties": { "name": name, "is_movable": is_movable, "is_traversable": is_traversable, "possible_actions": possible_actions, "is_human_agent": True, "visualize_size": visualize_size, "visualize_shape": visualize_shape, "visualize_colour": visualize_colour, "visualize_opacity": visualize_opacity, "visualize_depth": visualize_depth, "location": location, "team": team} } self.agent_settings.append(hu_ag_setting) def add_area(self, top_left_location, width, height, name, customizable_properties=None, visualize_colour=None, visualize_opacity=None, **custom_properties): # Check if width and height are large enough to make an actual room (with content) if width < 1 or height < 1: raise Exception(f"While adding area {name}; The width {width} and/or height {height} should both be larger" f" than 0.") # Get all locations in the rectangle locs = self.__list_area_locs(top_left_location, width, height) # Add all area objects self.add_multiple_objects(locations=locs, callable_classes=AreaTile, customizable_properties=customizable_properties, visualize_colours=visualize_colour, visualize_opacities=visualize_opacity, **custom_properties) def add_smoke_area(self, top_left_location, width, height, name, visualize_colour=None, smoke_thickness_multiplier=1.0, visualize_depth=None, **custom_properties): # Check if width and height are large enough to make an actual room (with content) if width < 1 or height < 1: raise Exception(f"While adding area {name}; The width {width} and/or height {height} should both be larger" f" than 0.") # Get all locations in the rectangle min_x = top_left_location[0] max_x = top_left_location[0] + width min_y = top_left_location[1] max_y = top_left_location[1] + height noise_grid = utils._white_noise(min_x, max_x, min_y, max_y, rng=self.rng) for x in range(noise_grid.shape[0]): for y in range(noise_grid.shape[1]): # get noise point noise = noise_grid[x, y] # convert from [-1,1] range to [0,1] range, and flip opacity = 1 - ((noise + 1.0) / 2.0) opacity = np.clip(opacity * smoke_thickness_multiplier, 0, 1) # add the smokeTile self.add_object(location=[x, y], name=name, callable_class=SmokeTile, visualize_colour=visualize_colour, visualize_opacity=opacity, visualize_depth=visualize_depth, **custom_properties) def __list_area_locs(self, top_left_location, width, height): """ Provided an area with the top_left_location, width and height, generate a list containing all coordinates in that area """ # Get all locations in the rectangle locs = [] min_x = top_left_location[0] max_x = top_left_location[0] + width min_y = top_left_location[1] max_y = top_left_location[1] + height for x in range(min_x, max_x): for y in range(min_y, max_y): locs.append((x, y)) return locs def add_line(self, start, end, name, callable_class=None, customizable_properties=None, is_traversable=None, is_movable=None, visualize_size=None, visualize_shape=None, visualize_colour=None, visualize_depth=None, visualize_opacity=None, **custom_properties): # Get the coordinates on the given line line_coords = _get_line_coords(start, end) # Construct the names names = [name for _ in line_coords] # Add the actual properties self.add_multiple_objects(locations=line_coords, names=names, callable_classes=callable_class, custom_properties=custom_properties, customizable_properties=customizable_properties, is_traversable=is_traversable, visualize_sizes=visualize_size, visualize_shapes=visualize_shape, visualize_colours=visualize_colour, visualize_opacities=visualize_opacity, visualize_depths=visualize_depth, is_movable=is_movable) def add_room(self, top_left_location, width, height, name, door_locations=None, with_area_tiles=False, doors_open=False, wall_custom_properties=None, wall_customizable_properties=None, area_custom_properties=None, area_customizable_properties=None, area_visualize_colour=None, area_visualize_opacity=None): # Check if width and height are large enough to make an actual room (with content) if width <= 2 or height <= 2: raise Exception(f"While adding room {name}; The width {width} and/or height {height} should both be larger" f" than 2.") # Check if the with_area boolean is True when any area properties are given if with_area_tiles is False and ( area_custom_properties is not None or area_customizable_properties is not None or area_visualize_colour is not None or area_visualize_opacity is not None): warnings.warn(f"While
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Last edited 2021-02-10 Anisotropic polycrystal multiphase field: model problem formulation, adapted from phase_field_composites by <NAME>, https://zenodo.org/record/1188970 This file is part of phase_field_polycrystals based on FEniCS project (https://fenicsproject.org/) phase_field_polycrystals (c) by <NAME>, Ecole des Ponts ParisTech, Laboratoire Navier (ENPC,IFSTTAR,CNRS UMR 8205) & Ecole Polytechnique, Laboratoire de Mécanique des Solides, Institut Polytechnique phase_field_polycrystals is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. You should have received a copy of the license along with this work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>. """ _author__ = "<NAME>" __license__ = "CC BY-SA 4.0" __email__ = "<EMAIL>" from dolfin import * import matplotlib.pyplot as plt import numpy as np from ufl import replace from .material_models import * from .hybrid_linear_solver import * from time import time from time import sleep import os from .remesher import * from .mesh_converter import * import types import scipy as sp import scipy.ndimage import sys import subprocess #import h5py from mpi4py import MPI as pyMPI from .j_integral import * from .version_date import * ## Setting up damage-part optimization solver class DamageProblem(OptimisationProblem): def __init__(self, Wtot,DW_d,D2W_d,d): OptimisationProblem.__init__(self) self.obj_func = Wtot self.residual = DW_d self.Jac = D2W_d self.var = d self.bcs = [] def f(self, x): self.var.vector()[:] = x return assemble(self.obj_func) def F(self, b, x): self.var.vector()[:] = x assemble(self.residual, b, self.bcs) def J(self, A, x): self.var.vector()[:] = x assemble(self.Jac, A, self.bcs) class FractureProblem: def __init__(self,mesh,facets,mat,prefix,loads=[[0,Constant(0.)]],Jcontours=[],mf=None,mvc=None): self.staggered_solver = dict({"iter_max":500,"tol":1e-4,"accelerated":False,"history_function":False}) self.comm = MPI.comm_world self.rank = MPI.rank(self.comm) self.mesh = mesh self.mf = mf self.mvc = mvc self.u_degree = 1 self.d_degree = 1 self.num_vertices = self.mesh.num_vertices() # NOK in parallel, use metric size instead self.facets = facets self.dim = mesh.geometric_dimension() self.mat = mat self.prefix = prefix if (not os.path.isdir(self.prefix)): os.system("mkdir %s" % self.prefix) self.bcs = [] self.bc_d =[] self.Uimp = [Expression("t", t=0, degree=0)] self.incr = 0 self.incr_save = 1 self.t = 0. self.dtime = 1.e-4 self.desired_dincr = 1.e-2 self.max_dincr = 1.e-2 self.min_dtime = 1.e-5 self.max_dtime = 1.e-3 self.final_time = 1.e-2 self.niter_tot = 0 self.niter_TAO = 0 self.niter_iterative = 0 self.niter_direct = 0 self.set_functions() self.dx = Measure("dx") self.ds = Measure("ds") self.loads = loads self.load_boundaries = [self.ds] self.Wext = self.define_external_work() self.resultant = self.sig[1,1]*self.ds self.results = XDMFFile(MPI.comm_world,self.prefix+"output.xdmf") self.results.parameters["flush_output"] = True self.results.parameters["functions_share_mesh"] = True self.J_results = XDMFFile(MPI.comm_world,self.prefix+"J_integral.xdmf") self.J_results.parameters["flush_output"] = True self.J_results.parameters["functions_share_mesh"] = True self.checkpoints = XDMFFile(MPI.comm_world,self.prefix+"checkpoint.xdmf") self.checkpoints.parameters["flush_output"] = True self.checkpoints.parameters["functions_share_mesh"] = True self.save_all = True self.save_intermediate = False self.save_checkpoints = False self.write_checkpoint_count = 0 self.use_hybrid_solver = False self.normal = FacetNormal(mesh) self.dsJ = Jcontours self.J = [] self.remesh = False self.use_remeshing = False self.remeshing_criterion = 1.e-2 self.remeshing_index = 1 self.nbgrains = -1 self.remesher = None self.boundaries = [] self.markers = [] self.domains = [] self.domains_markers = [] self.myBCS = self.BCS(self.Vu,self.Vd,self.facets) self.myResultant = self.Resultant(self.mat,self.u,self.d,self.P1pos,self.P2pos,self.P3pos,self.ds) self.gaussian_filter_sigma = 1. self.no_residual_stiffness=[False,0.99] self.JIntegral = None self.timings = True self.null_space_basis = None self.rigid_body_motion=[] class BCS(): def __init__(self, Vu, Vd, facets): self.Vu = Vu self.Vd = Vd self.facets = facets class Resultant(): def __init__(self, mat, u, d, P1pos, P2pos, P3pos, ds): self.mat = mat self.u = u self.d = d self.P1pos = P1pos self.P2pos = P2pos self.P3pos = P3pos self.ds = ds def set_functions(self): # Definition of functions spaces and test/trial functions self.Vu = VectorFunctionSpace(self.mesh, "CG", self.u_degree, dim=self.dim) if self.mat.damage_dim == 1: self.Vd = FunctionSpace(self.mesh, "CG", self.d_degree) else: self.Vd = VectorFunctionSpace(self.mesh, "CG", self.d_degree, dim=self.mat.damage_dim) self.V0 = FunctionSpace(self.mesh, "DG", 0) self.Vsig = TensorFunctionSpace(self.mesh, "CG", self.u_degree, shape=(3,3)) self.VV = VectorFunctionSpace(self.mesh, "DG", 0, dim=3) self.Vr = TensorFunctionSpace(self.mesh, "DG", 0, shape=(3,3)) self.Vmetric = FunctionSpace(self.mesh, "CG", self.d_degree) self.u_ = TestFunction(self.Vu) self.du = TrialFunction(self.Vu) self.d_ = TestFunction(self.Vd) self.dd = TrialFunction(self.Vd) # Definition of functions self.u = Function(self.Vu,name="Displacement") #self.u_prev = Function(self.Vu,name="Previous displacement") self.d = Function(self.Vd,name="Damage") self.d_prev = Function(self.Vd,name="Previous damage") self.d_prev_iter = Function(self.Vd,name="Previous damage in staggered minimization") #self.dold = Function(self.Vd,name="Old damage") self.d_lb = Function(self.Vd,name="Lower bound d_n") self.d_ub = Function(self.Vd,name="Damage upper bound") #"Upper bound 1") self.d_ar= Function(self.Vd,name="Damage field after remeshing") self.d_ub = self.mat.dub self.sig = Function(self.Vsig,name="Stress") self.eel = Function(self.Vsig,name="ElasticStrain") self.epspos = Function(self.Vsig,name="Strain (+)") self.epsneg = Function(self.Vsig,name="Strain (-)") #self.V1 = Function(self.VV,name="V1") #self.V2 = Function(self.VV,name="V2") #self.V3 = Function(self.VV,name="V3") # if self.staggered_solver["accelerated"]: # self.tmp_u = Function(self.Vu) # self.tmp_d = Function(self.Vd) self.R = Function(self.Vr,name="Rotation matrix") self.dissipated = Function(self.V0,name="Plastic dissipation") self.stored = Function(self.V0,name="Stored energy") self.P1pos = Function(self.V0,name="P1pos") self.P2pos = Function(self.V0,name="P2pos") self.P3pos = Function(self.V0,name="P3pos") self.P1pos.interpolate(Constant(1.)) self.P2pos.interpolate(Constant(1.)) self.P3pos.interpolate(Constant(1.)) self.Efrac_field = Function(self.V0,name="Efrac") self.d_eq_fiss = Function(self.Vd,name="deq") if self.mat.damage_dim==1: self.d_eq_fiss.interpolate(Constant((1.))) self.d_prev_iter.interpolate(Constant(0.)) else: self.d_eq_fiss.interpolate(Constant((1.,)*self.mat.damage_dim)) self.d_prev_iter.interpolate(Constant((0.,)*self.mat.damage_dim)) #self.Vstiffness = TensorFunctionSpace(self.mesh, "CG", 1, shape=(6,6)) #self.stiffness = Function(self.Vstiffness,name="Stiffness") self.metric = Function(self.Vmetric,name="Remeshing metric") self.metric.interpolate(Constant(0.)) def set_load(self,u): L = self.loads[0][1]*u[self.loads[0][0]]*self.load_boundaries[0] for (load,load_boundary) in list(zip(self.loads[1:],self.load_boundaries[1:])): L += load[1]*u[load[0]]*load_boundary return L def define_external_work(self): return self.set_load(self.u) #return dot(self.load,self.u)*self.ds def set_energies(self): if (not self.mat.behaviour=="linear_elasticity"): self.mb = self.mat.mfront_behaviour.create_material() self.solver_u = mf.MFrontNonlinearProblem(self.u, self.mb, quadrature_degree=1, bcs=self.bcs) self.solver_u.register_external_state_variable("Damage", self.d) ''' prm = self.solver_u.solver.parameters #prm['nonlinear_solver'] = 'newton' prm['linear_solver'] = 'gmres' #'mumps' #'minres' #'cg' #'cg' #'mumps' #'gmres' #'petsc' #'umfpack' #'tfqmr' #prm['preconditioner'] = 'petsc_amg' #'ilu' # 'sor' # 'icc' # 'petsc_amg' #prm['krylov_solver']['error_on_nonconvergence'] = True #prm['krylov_solver']['monitor_convergence'] = True #prm['krylov_solver']['absolute_tolerance'] = 1E-14 #prm['krylov_solver']['relative_tolerance'] = 1E-14 #prm['krylov_solver']['maximum_iterations'] = 10000 prm['krylov_solver']['nonzero_initial_guess'] = True prm['preconditioner'] = 'hypre_amg' prm['absolute_tolerance'] = 1E-6 #-9 prm['relative_tolerance'] = 1E-8 #-8 #prm['maximum_iterations'] = 1000 #25 #prm['relaxation_parameter'] = 1. ##prm['krylov_solver']['gmres']['restart'] = 40 ##prm['krylov_solver']['preconditioner']['ilu']['fill_level'] = 0 prm["report"] = True #prm['lu_solver']['symmetric'] = True #False ''' self.solver_u.solver = PETScSNESSolver('newtonls') #'newtontr') #'newtonls') prm = self.solver_u.solver.parameters #prm['nonlinear_solver'] = 'snes' #prm['line_search'] = 'bt' #'cp' #'cp' #'nleqerr' # 'bt' # 'basic' # 'l2' prm['line_search'] = 'nleqerr' #prm['linear_solver'] = 'mumps' prm['linear_solver'] = 'cg' #'gmres' #'cg' #'gmres' prm['preconditioner'] = 'amg' #'hypre_amg' prm['krylov_solver']['nonzero_initial_guess'] = False # True #prm['maximum_iterations'] = 50 prm['absolute_tolerance'] = 1E-5 #prm['relative_tolerance'] = 1E-8 #prm['report'] = False #True self.load = self.set_load(self.u) self.solver_u.set_loading(self.load) self.dissipated.vector().set_local(self.mb.data_manager.s1.dissipated_energies) self.dissipated.vector().apply("insert") self.stored.vector().set_local(self.mb.data_manager.s1.stored_energies) self.stored.vector().apply("insert") #print(max(_dummy_function.vector()[:])) self.sigma() #self.eps_elas() ## self.Wel = 0.5*inner(self.sig,self.eel)*self.dx ## self.Wel = 0.5*(1.-self.d)**2*inner(self.sig,self.eel)*self.dx self.Wel = (1.-self.d)**2*self.stored*self.dx #self.Wel = 0.5*self.stored*self.dx self.Wdis = (1.-self.d)**2*self.dissipated*self.dx else: # Definition of energy densities # self.Wel = 0.5*inner(self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos),eps(self.u))*self.dx # self.Wel = 0.5*inner(self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos,self.P3pos),eps(self.u,self.dim))*self.dx self.sigma() self.Wel = 0.5*inner(self.sig,eps(self.u,self.dim))*self.dx self.Efrac = self.mat.fracture_energy_density(self.d,self.d_prev_iter) self.Wfrac = sum(self.Efrac)*self.dx self.Wtot = self.Wel + self.Wfrac - self.Wext if (not self.mat.behaviour=="linear_elasticity"): self.Wtot += self.Wdis # Definition of J integral if (self.dim == 2): normal3 = as_vector([self.normal[0],self.normal[1],0.]) sigma_n3 = dot(normal3, self.sig) sigma_n = as_vector([sigma_n3[0],sigma_n3[1]]) elif (self.dim == 3): normal3 = self.normal sigma_n = dot(normal3, self.sig) if (not self.dsJ==[]): self.J=[] for c in self.dsJ: #self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1] \ # - inner(sigma_n, grad(self.u)[:,0]) ) * c ) # for outer boundaries self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1]) * c ) self.J.append( (- inner(sigma_n, grad(self.u)[:,0]) ) * c ) # for outer boundaries for c in self.dsJ: #self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[0] \ # - inner(sigma_n, grad(self.u)[:,1]) ) * c ) # for outer boundaries self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[0] ) * c) self.J.append( (- inner(sigma_n, grad(self.u)[:,1]) ) * c ) # for outer boundaries # self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1] \ # - inner(sigma_n, grad(self.u)[:,0]) )('-') * c ) # for inner boundaries # Definition of energy derivatives self.DW_u = derivative(self.Wtot,self.u,self.u_) self.D2W_u = derivative(self.DW_u,self.u,self.du) self.DW_d = derivative(self.Wtot,self.d,self.d_) self.D2W_d = derivative(self.DW_d,self.d,self.dd) def set_problems(self): if not self.rigid_body_motion==[]: null_space = [interpolate(n, self.Vu).vector() for n in self.rigid_body_motion] # Make into unit vectors [normalize(n, 'l2') for n in null_space] # Create null space basis object self.null_space_basis = VectorSpaceBasis(null_space) # Setting up displacement-part linear solver # LinearVariationalProblem(lhs(self.D2W_u),replace(self.Wext,{self.u:self.u_}), self.u, self.bcs) if (self.mat.behaviour=='linear_elasticity'): self.load = self.set_load(self.u_) if (self.use_hybrid_solver): #self.solver_u = HybridLinearSolver(lhs(self.D2W_u),dot(self.load,self.u_)*self.ds,\ self.solver_u = HybridLinearSolver(lhs(self.D2W_u),self.load,\ self.u,bcs=self.bcs,parameters={"iteration_switch": 5,\ "user_switch": True},null_space_basis=self.null_space_basis) #not self.remesh or (self.niter>0)}) else: if (not self.mat.tension_compression_asymmetry): #self.problem_u = LinearVariationalProblem(lhs(self.D2W_u),dot(self.load,self.u_)*self.ds,self.u,self.bcs) self.problem_u = LinearVariationalProblem(lhs(self.D2W_u),self.load,self.u,self.bcs) self.solver_u = LinearVariationalSolver(self.problem_u) self.solver_u.parameters["linear_solver"] = "mumps" else: self.problem_u = NonlinearVariationalProblem(self.DW_u,self.u,self.bcs,J=self.D2W_u) self.solver_u = NonlinearVariationalSolver(self.problem_u) prm = self.solver_u.parameters prm['nonlinear_solver'] = 'newton' prm['newton_solver']['linear_solver'] = 'mumps' #'gmres' #'mumps' #'petsc' prm['newton_solver']['error_on_nonconvergence'] = False #True prm['newton_solver']['absolute_tolerance'] = 1E-9 prm['newton_solver']['relative_tolerance'] = 1E-8 prm['newton_solver']['maximum_iterations'] = 25 #10000 #25 prm['newton_solver']['relaxation_parameter'] = 1.0 prm['newton_solver']['lu_solver']['report'] = True #prm['newton_solver']['lu_solver']['reuse_factorization'] = False #prm['newton_solver']['lu_solver']['same_nonzero_pattern'] = False prm['newton_solver']['lu_solver']['symmetric'] = False prm['newton_solver']['krylov_solver']['error_on_nonconvergence'] = True prm['newton_solver']['krylov_solver']['absolute_tolerance'] = 1E-7 prm['newton_solver']['krylov_solver']['relative_tolerance'] = 1E-5 prm['newton_solver']['krylov_solver']['maximum_iterations'] = 1000 prm['newton_solver']['krylov_solver']['nonzero_initial_guess'] = True if prm['newton_solver']['linear_solver'] == 'gmres': prm['newton_solver']['preconditioner'] = 'ilu' #self.solver_u.parameters["newton_solver"]["linear_solver"] = "mumps" self.solver_d = PETScTAOSolver() self.solver_d.parameters["method"] = "tron" self.solver_d.parameters["line_search"] = "gpcg" self.solver_d.parameters["linear_solver"] = "cg" #"mumps" #"mumps" #'cg", "gltr", "gmres", "nash" #self.solver_d.parameters["preconditioner"] = "hypre_amg" self.solver_d.parameters["maximum_iterations"] = 5000 # self.solver_d.parameters["gradient_absolute_tol"] = self.staggered_solver["tol"] # self.solver_d.parameters["gradient_relative_tol"] = self.staggered_solver["tol"] # self.solver_d.parameters["gradient_t_tol"] = self.staggered_solver["tol"] self.solver_d.parameters["gradient_absolute_tol"] = 1.e-4 self.solver_d.parameters["gradient_relative_tol"] = 1.e-4 self.solver_d.parameters["gradient_t_tol"] = 1.e-4 #@profile def staggered_solve(self): DeltaE = 1. self.niter = 0 if(self.remesh == False):
0) mid_sparse_xy1 = (mid_sparse_xy0[0] + ori_input_length, mid_sparse_xy0[1] + ori_output_length) mid_times_xy = (mid_width + 1, mid_height / 2) mid_equal_xy = (mid_width + 4, mid_height / 2) right_input_xy0 = (0, 0) right_input_xy1 = (1, min(total_height if total_height % 2 == 1 else total_height - 1, right_input_xy0[1] + input_length)) right_output_xy0 = (0, 0) right_output_xy1 = (1, min(total_height if total_height % 2 == 1 else total_height - 1, right_output_xy0[1] + output_length)) if transposed: # Kernel subindex must be transposed ab_input = "b" ab_output = "c" kernel_subindex = list(itertools.product(range(kernel_size - 1, -1, -1), range(kernel_size))) else: ab_input = "a" ab_output = "b" kernel_subindex = list(itertools.product(range(kernel_size), range(kernel_size - 1, -1, -1))) ab_kernel = "w" input_index = list(itertools.product(range(input_size), range(input_size))) kernel_index = list(itertools.product(range(kernel_size), range(kernel_size))) output_index = list(itertools.product(range(output_size), range(output_size))) input_subindex = list(itertools.product(range(input_size), range(input_size - 1, -1, -1))) output_subindex = list(itertools.product(range(output_size), range(output_size - 1, -1, -1))) kernel_offsets = list(itertools.product(range(output_size - 1, -1, -1), range(output_size))) kernel_offset_y, kernel_offset_x = kernel_offsets[step] def get_sparse_index(): kfv = [] total_input_size_with_dilation = spacing * (input_size - 1) + 1 + 2 * padding ifv = np.zeros((total_input_size_with_dilation,) * 2, dtype=np.uint8) # input flat values has_value_index = list(itertools.product(range(padding, total_input_size_with_dilation - padding, spacing), range(padding, total_input_size_with_dilation - padding, spacing))) for i, (x, y) in enumerate(has_value_index): ifv[x, y] = i + 1 if transposed: for i in range(output_size): for j in range(output_size): patch = ifv[i * stride:i * stride + kernel_size, j * stride:j * stride + kernel_size] indices = [ori_output_length - 1 - x for x in patch[patch != 0] - 1] subindex = np.where(patch != 0) kfv.append((([i * output_size + j] * len(indices), indices), (kernel_size - 1 - subindex[0], kernel_size - 1 - subindex[1]))) # ((i, j), kernel_subindex) else: for i in range(output_size): for j in range(output_size): patch = ifv[i * stride:i * stride + kernel_size, j * stride:j * stride + kernel_size] indices = (patch[patch != 0] - 1).tolist() kfv.append(((indices, [ori_output_length - 1 - i * output_size - j] * len(indices)), np.where(patch != 0))) # ((i, j), kernel_subindex) return kfv kernel_flat_values = get_sparse_index() def gen_right_string(ab, max_height, templates, index, color): rectangle_template, grid_template, value_template, dot_template, step_template = templates result = '' length = len(index) if length > max_height: half = (max_height - 1) // 2 # Lower half result += rectangle_template.format(color, 0, 0, 1, half) result += ''.join( grid_template.format(0, i, 1, i + 1) for i in range(half)) result += ''.join( value_template.format(0.5, i + 0.5, "large", ab, x, y) for i, (x, y) in zip(range(half), reversed(index))) # VDots result += dot_template.format(0.5, half + 0.6) # Upper half result += rectangle_template.format(color, 0, half + 1, 1, max_height) result += ''.join( grid_template.format(0, max_height - 1 - i, 1, max_height - 1 - i + 1) for i in range(half)) result += ''.join( value_template.format(0.5, max_height - 1 - i + 0.5, "large", ab, x, y) for i, (x, y) in zip(range(half), index)) if color == 'cyan': # for output if step < half: result += step_template.format(0, max_height - 1 - step, 1, max_height - 1 - step + 1) elif step < length - half: pass else: result += step_template.format(0, length - 1 - step, 1, length - 1 - step + 1) else: max_height = length result += rectangle_template.format(color, 0, 0, 1, max_height) result += ''.join( grid_template.format(0, max_height - 1 - i, 1, max_height - 1 - i + 1) for i in range(max_height)) result += ''.join( value_template.format(0.5, max_height - 1 - i + 0.5, "large", ab, x, y) for i, (x, y) in zip(range(max_height), index)) if color == 'cyan': result += step_template.format(0, max_height - 1 - step, 1, max_height - 1 - step + 1) return result tex_template = Path('latex_templates/alphabet.txt').read_text() unit_template = '\\draw[draw=base03, fill=blue, thick] ({},{}) rectangle ({},{});' value_template = '\\node (node) at ({},{}) {{\\{} $ {}_{{{}{}}} $}};' highlight_template = '\\draw[fill=base02, opacity=0.4] ({},{}) rectangle ({},{});' right_templates = [ '\\draw[fill={}] ({},{}) rectangle ({},{});', '\\draw[step=10mm, base03, thick] ({},{}) grid ({},{});', value_template, '\\node (node) at ({},{}) {{\\large $ \\vdots $}};', '\draw[fill=base02, opacity=0.4] ({},{}) rectangle ({},{});' ] return tex_template.format(**{ 'TOTAL_HEIGHT': f'{total_height}', 'LEFT_X': f'{left_xy[0]}', 'LEFT_Y': f'{left_xy[1]}', 'LEFT_OUTPUT_X': f'{left_output_xy[0]}', 'LEFT_OUTPUT_Y': f'{left_output_xy[1]}', 'MID_X': f'{mid_xy[0]}', 'MID_Y': f'{mid_xy[1]}', 'RIGHT_X': f'{right_xy[0]}', 'RIGHT_Y': f'{right_xy[1]}', 'RIGHT_INPUT_X': f'{right_input_xy[0]}', 'RIGHT_INPUT_Y': f'{right_input_xy[1]}', 'RIGHT_OUTPUT_X': f'{right_output_xy[0]}', 'RIGHT_OUTPUT_Y': f'{right_output_xy[1]}', 'LEFT_TOTAL_INPUT_XY0': f'{left_total_input_xy0[0]}, {left_total_input_xy0[1]}', 'LEFT_TOTAL_INPUT_XY1': f'{left_total_input_xy1[0]}, {left_total_input_xy1[1]}', 'LEFT_INPUT_UNITS': ''.join( unit_template.format(left_input_xy0[0] + spacing * i, bottom_pad + left_input_xy0[0] + spacing * j, left_input_xy0[0] + spacing * i + 1, bottom_pad + left_input_xy0[0] + spacing * j + 1) for i, j in input_index), 'LEFT_INPUT_VALUES': ''.join( value_template.format(left_input_xy0[0] + spacing * i + 0.4, bottom_pad + left_input_xy0[0] + spacing * j + 0.6, "large", ab_input, x, y) for (i, j), (y, x) in zip(input_index, input_subindex)), 'LEFT_KERNEL_FROM': f'{kernel_offset_x * stride},{kernel_offset_y * stride + y_adjustment}', 'LEFT_KERNEL_TO': f'{kernel_offset_x * stride + kernel_size},{kernel_offset_y * stride + kernel_size + y_adjustment}', 'LEFT_KERNEL_VALUES': ''.join( value_template.format(kernel_offset_x * stride + i + 0.75, kernel_offset_y * stride + y_adjustment + j + 0.2, "scriptsize", ab_kernel, x, y) for (i, j), (y, x) in zip(kernel_index, kernel_subindex)), 'LEFT_OUTPUT_XY0': f'{left_output_xy0[0]},{left_output_xy0[1]}', 'LEFT_OUTPUT_XY1': f'{left_output_xy1[0]},{left_output_xy1[1]}', 'LEFT_OUTPUT_STEP_XY0': f'{left_output_xy0[0] + kernel_offset_x},{left_output_xy0[1] + kernel_offset_y}', 'LEFT_OUTPUT_STEP_XY1': f'{left_output_xy0[0] + kernel_offset_x + 1},{left_output_xy0[1] + kernel_offset_y + 1}', 'LEFT_OUTPUT_VALUES': ''.join( value_template.format(left_output_xy0[0] + i + 0.5, left_output_xy0[1] + j + 0.5, "large", ab_output, x, y) for (i, j), (y, x) in zip(output_index, output_subindex)), 'MID_SPARSE_XY0': f'{mid_sparse_xy0[0]},{mid_sparse_xy0[1]}', 'MID_SPARSE_XY1': f'{mid_sparse_xy1[0]},{mid_sparse_xy1[1]}', 'MID_STEP_XY0': f'{step},{0}' if transposed else f'{0},{ori_output_length - 1 - step}', 'MID_STEP_XY1': f'{step + 1},{ori_output_length}' if transposed else f'{ori_input_length},{ori_output_length - step}', 'MID_UNITS': ''.join( highlight_template.format(i, j, i + 1, j + 1) for i, j in zip(*kernel_flat_values[step][0])), 'MID_VALUES': ''.join( value_template.format(i + 0.5, j + 0.5, "large", ab_kernel, kernel_ix, kernel_iy) for line_input_index, line_kernel_index in kernel_flat_values for (i, j), (kernel_ix, kernel_iy) in zip(np.array(line_input_index).transpose(), np.array(line_kernel_index).transpose()) ), 'MID_TIMES_XY': f'{mid_times_xy[0]},{mid_times_xy[1]}', 'MID_EQUAL_XY': f'{mid_equal_xy[0]},{mid_equal_xy[1]}', 'RIGHT_INPUT_STRING': gen_right_string(ab_input, right_input_xy1[1], right_templates, input_index, 'blue'), 'RIGHT_OUTPUT_STRING': gen_right_string(ab_output, right_output_xy1[1], right_templates, output_index, 'cyan'), }).encode("utf-8"), output_size def compile_figure(which_, name, step, **kwargs): anim = kwargs.pop("animation") dtype = kwargs.pop("type") quality = kwargs.pop("quality") quality_anim = kwargs.pop("quality_animation") out_dir = Path(__file__).parent / "out" out_dir.mkdir(parents=True, exist_ok=True) def cvt_dtype(pdfname): if dtype == "png": pngname = pdfname.with_suffix(".png") subprocess.call(f'convert -density {quality_anim if anim else quality} {pdfname} {pngname}'.split()) subprocess.call(f'rm {pdfname}'.split()) def run(step, **kwargs): if which_ == 'arithmetic': tex_string, output_size = make_arithmetic_tex_string(step, **kwargs) elif which_ == 'numerical': tex_string, output_size = make_numerical_tex_string(step, **kwargs) elif which_ == 'alphabet': tex_string, output_size = make_alphabet_tex_string(step, **kwargs) else: raise ValueError() jobname = '{}_{:02d}'.format(name, step) p = subprocess.Popen(['pdflatex', f'-jobname={jobname}', '-output-directory', str(out_dir)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutdata, _ = p.communicate(input=tex_string) # Remove logs and aux if compilation was successfull if '! LaTeX Error' in stdoutdata.decode("utf-8") or '! Emergency stop' in stdoutdata.decode("utf-8"): print(f'! LaTeX Error: check the log file in {out_dir}/{jobname}.log') else: subprocess.call(['rm'] + list(out_dir.glob("*.aux")) + list(out_dir.glob("*.log"))) pdfname = out_dir / (jobname + ".pdf") cvt_dtype(pdfname) return output_size output_size = run(step, **kwargs) if anim: gif_dir = Path(__file__).parent / "gif" gif_dir.mkdir(parents=True, exist_ok=True) print("Generating frames...") for i in tqdm.tqdm(range(output_size ** 2)): if i != step: run(i, **kwargs) print("Synthesizing gif... (If it takes too much time, try to reduce output quality by add `-qa 150`)") input_files = ' '.join([str(x) for x in sorted(out_dir.glob("*.png"))]) out_file = gif_dir / (name + ".gif") subprocess.call(f'convert -delay 100 -loop 0 -layers Optimize +map -background white -alpha remove -alpha off -dispose previous {input_files} {out_file}'.split()) subprocess.call(f'gifsicle --batch -O3 {out_file}'.split()) print("Clear working space...") subprocess.call(f'rm {" ".join([str(x) for x in out_dir.glob("*.png")])}'.split()) print("Done.") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Compile a LaTeX figure as part of a convolution animation.") subparsers = parser.add_subparsers() parent_parser = argparse.ArgumentParser(add_help=False) parent_parser.add_argument("name", type=str, help="name for the animation") parent_parser.add_argument("-e", "--step", type=int, default=0, help="animation step. (default: %(default)s)") parent_parser.add_argument("-i", "--input-size", type=int, default=5, help="input size. (default: %(default)s)") parent_parser.add_argument("-o", "--output-size", type=int, default=3, help="output size. (default: %(default)s)") parent_parser.add_argument("-p", "--padding", type=int, default=0, help="zero padding. (default: %(default)s)") parent_parser.add_argument("-k", "--kernel-size", type=int, default=3, help="kernel size. (default: %(default)s)") parent_parser.add_argument("-s", "--stride", type=int, default=1, help="stride. (default: %(default)s)") parent_parser.add_argument("-d", "--dilation", type=int, default=1, help="dilation. (default: %(default)s)") parent_parser.add_argument("-a", "--animation", action="store_true", help="Make an animation output instead of a single step pdf.") parent_parser.add_argument("-y", "--type", type=str, default="png", choices=["pdf", "png"], help="Output type of a single frame. (default: %(default)s)") parent_parser.add_argument("-q", "--quality", type=int, default=600, help="Quality of the frame. Larger is better.
superclass = None def __init__(self, sphere_radius=None, central_meridian=None, false_easting=None, false_northing=None): self.sphere_radius = sphere_radius self.central_meridian = central_meridian self.false_easting = false_easting self.false_northing = false_northing def factory(*args_, **kwargs_): if sin_proj_params.subclass: return sin_proj_params.subclass(*args_, **kwargs_) else: return sin_proj_params(*args_, **kwargs_) factory = staticmethod(factory) def get_sphere_radius(self): return self.sphere_radius def set_sphere_radius(self, sphere_radius): self.sphere_radius = sphere_radius def get_central_meridian(self): return self.central_meridian def set_central_meridian(self, central_meridian): self.central_meridian = central_meridian def get_false_easting(self): return self.false_easting def set_false_easting(self, false_easting): self.false_easting = false_easting def get_false_northing(self): return self.false_northing def set_false_northing(self, false_northing): self.false_northing = false_northing def hasContent_(self): if ( self.sphere_radius is not None or self.central_meridian is not None or self.false_easting is not None or self.false_northing is not None ): return True else: return False def export(self, outfile, level, namespace_='', name_='sin_proj_params', namespacedef_='', pretty_print=True): # Check if we are at the root level and output the XML header if level == 0: outfile.write('<?xml version="1.0"?>\n') outfile.write('\n') if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(outfile, level, pretty_print) # Check if we are at the root level and output attributes first before namespacedef if level == 0: outfile.write('<%s%s' % (namespace_, name_)) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='sin_proj_params') outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or '')) else: outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='sin_proj_params') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='sin_proj_params', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='sin_proj_params'): pass def exportChildren(self, outfile, level, namespace_='', name_='sin_proj_params', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.sphere_radius is not None: showIndent(outfile, level, pretty_print) outfile.write('<%ssphere_radius>%s</%ssphere_radius>%s' % (namespace_, self.gds_format_double(self.sphere_radius, input_name='sphere_radius'), namespace_, eol_)) if self.central_meridian is not None: showIndent(outfile, level, pretty_print) outfile.write('<%scentral_meridian>%s</%scentral_meridian>%s' % (namespace_, self.gds_format_float(self.central_meridian, input_name='central_meridian'), namespace_, eol_)) if self.false_easting is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sfalse_easting>%s</%sfalse_easting>%s' % (namespace_, self.gds_format_double(self.false_easting, input_name='false_easting'), namespace_, eol_)) if self.false_northing is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sfalse_northing>%s</%sfalse_northing>%s' % (namespace_, self.gds_format_double(self.false_northing, input_name='false_northing'), namespace_, eol_)) def exportLiteral(self, outfile, level, name_='sin_proj_params'): level += 1 already_processed = set() self.exportLiteralAttributes(outfile, level, already_processed, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.sphere_radius is not None: showIndent(outfile, level) outfile.write('sphere_radius=%e,\n' % self.sphere_radius) if self.central_meridian is not None: showIndent(outfile, level) outfile.write('central_meridian=%f,\n' % self.central_meridian) if self.false_easting is not None: showIndent(outfile, level) outfile.write('false_easting=%e,\n' % self.false_easting) if self.false_northing is not None: showIndent(outfile, level) outfile.write('false_northing=%e,\n' % self.false_northing) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'sphere_radius': sval_ = child_.text try: fval_ = float(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires float or double: %s' % exp) fval_ = self.gds_validate_float(fval_, node, 'sphere_radius') self.sphere_radius = fval_ elif nodeName_ == 'central_meridian': sval_ = child_.text try: fval_ = float(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires float or double: %s' % exp) fval_ = self.gds_validate_float(fval_, node, 'central_meridian') self.central_meridian = fval_ elif nodeName_ == 'false_easting': sval_ = child_.text try: fval_ = float(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires float or double: %s' % exp) fval_ = self.gds_validate_float(fval_, node, 'false_easting') self.false_easting = fval_ elif nodeName_ == 'false_northing': sval_ = child_.text try: fval_ = float(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires float or double: %s' % exp) fval_ = self.gds_validate_float(fval_, node, 'false_northing') self.false_northing = fval_ # end class sin_proj_params class projection_information(GeneratedsSuper): subclass = None superclass = None def __init__(self, projection=None, datum=None, units=None, corner_point=None, grid_origin=None, utm_proj_params=None, ps_proj_params=None, albers_proj_params=None, sin_proj_params=None): self.projection = _cast(None, projection) self.datum = _cast(None, datum) self.units = _cast(None, units) if corner_point is None: self.corner_point = [] else: self.corner_point = corner_point self.grid_origin = grid_origin self.utm_proj_params = utm_proj_params self.ps_proj_params = ps_proj_params self.albers_proj_params = albers_proj_params self.sin_proj_params = sin_proj_params def factory(*args_, **kwargs_): if projection_information.subclass: return projection_information.subclass(*args_, **kwargs_) else: return projection_information(*args_, **kwargs_) factory = staticmethod(factory) def get_corner_point(self): return self.corner_point def set_corner_point(self, corner_point): self.corner_point = corner_point def add_corner_point(self, value): self.corner_point.append(value) def insert_corner_point(self, index, value): self.corner_point[index] = value def get_grid_origin(self): return self.grid_origin def set_grid_origin(self, grid_origin): self.grid_origin = grid_origin def get_utm_proj_params(self): return self.utm_proj_params def set_utm_proj_params(self, utm_proj_params): self.utm_proj_params = utm_proj_params def get_ps_proj_params(self): return self.ps_proj_params def set_ps_proj_params(self, ps_proj_params): self.ps_proj_params = ps_proj_params def get_albers_proj_params(self): return self.albers_proj_params def set_albers_proj_params(self, albers_proj_params): self.albers_proj_params = albers_proj_params def get_sin_proj_params(self): return self.sin_proj_params def set_sin_proj_params(self, sin_proj_params): self.sin_proj_params = sin_proj_params def get_projection(self): return self.projection def set_projection(self, projection): self.projection = projection def get_datum(self): return self.datum def set_datum(self, datum): self.datum = datum def get_units(self): return self.units def set_units(self, units): self.units = units def validate_projectionType(self, value): # Validate type projectionType, a restriction on xs:string. pass def validate_datumType(self, value): # Validate type datumType, a restriction on xs:string. pass def validate_projectionUnitsType(self, value): # Validate type projectionUnitsType, a restriction on xs:string. pass def hasContent_(self): if ( self.corner_point or self.grid_origin is not None or self.utm_proj_params is not None or self.ps_proj_params is not None or self.albers_proj_params is not None or self.sin_proj_params is not None ): return True else: return False def export(self, outfile, level, namespace_='', name_='projection_information', namespacedef_='', pretty_print=True): # Check if we are at the root level and output the XML header if level == 0: outfile.write('<?xml version="1.0"?>\n') outfile.write('\n') if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(outfile, level, pretty_print) # Check if we are at the root level and output attributes first before namespacedef if level == 0: outfile.write('<%s%s' % (namespace_, name_)) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='projection_information') outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or '')) else: outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='projection_information') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='projection_information', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='projection_information'): if self.projection is not None and 'projection' not in already_processed: already_processed.add('projection') outfile.write(' projection=%s' % (quote_attrib(self.projection), )) if self.datum is not None and 'datum' not in already_processed: already_processed.add('datum') outfile.write(' datum=%s' % (quote_attrib(self.datum), )) if self.units is not None and 'units' not in already_processed: already_processed.add('units') outfile.write(' units=%s' % (quote_attrib(self.units), )) def exportChildren(self, outfile, level, namespace_='', name_='projection_information', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for corner_point_ in self.corner_point: corner_point_.export(outfile, level, namespace_, name_='corner_point', pretty_print=pretty_print) if self.grid_origin is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sgrid_origin>%s</%sgrid_origin>%s' % (namespace_, self.gds_format_string(quote_xml(self.grid_origin).encode(ExternalEncoding), input_name='grid_origin'), namespace_, eol_)) if self.utm_proj_params is not None: self.utm_proj_params.export(outfile, level, namespace_, name_='utm_proj_params', pretty_print=pretty_print) if self.ps_proj_params is not None: self.ps_proj_params.export(outfile, level, namespace_, name_='ps_proj_params', pretty_print=pretty_print) if self.albers_proj_params is not None: self.albers_proj_params.export(outfile, level, namespace_, name_='albers_proj_params', pretty_print=pretty_print) if self.sin_proj_params is not None: self.sin_proj_params.export(outfile, level, namespace_, name_='sin_proj_params', pretty_print=pretty_print) def exportLiteral(self, outfile, level, name_='projection_information'): level += 1 already_processed = set() self.exportLiteralAttributes(outfile, level, already_processed, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): if self.projection is not None and 'projection' not in already_processed: already_processed.add('projection') showIndent(outfile, level) outfile.write('projection="%s",\n' % (self.projection,)) if self.datum is not None and 'datum' not in already_processed: already_processed.add('datum') showIndent(outfile, level) outfile.write('datum="%s",\n' % (self.datum,)) if self.units is not None and 'units' not in already_processed: already_processed.add('units') showIndent(outfile, level) outfile.write('units="%s",\n' % (self.units,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('corner_point=[\n') level += 1 for corner_point_ in self.corner_point: showIndent(outfile, level) outfile.write('model_.corner_point(\n') corner_point_.exportLiteral(outfile, level) showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.grid_origin is not None: showIndent(outfile, level) outfile.write('grid_origin=%s,\n' % quote_python(self.grid_origin).encode(ExternalEncoding)) if self.utm_proj_params is not None: showIndent(outfile, level) outfile.write('utm_proj_params=model_.utm_proj_params(\n') self.utm_proj_params.exportLiteral(outfile, level) showIndent(outfile, level) outfile.write('),\n') if self.ps_proj_params is not None: showIndent(outfile, level) outfile.write('ps_proj_params=model_.ps_proj_params(\n') self.ps_proj_params.exportLiteral(outfile, level) showIndent(outfile, level) outfile.write('),\n') if self.albers_proj_params is not None: showIndent(outfile, level) outfile.write('albers_proj_params=model_.albers_proj_params(\n') self.albers_proj_params.exportLiteral(outfile, level) showIndent(outfile, level) outfile.write('),\n') if self.sin_proj_params is not None: showIndent(outfile, level) outfile.write('sin_proj_params=model_.sin_proj_params(\n') self.sin_proj_params.exportLiteral(outfile, level) showIndent(outfile, level) outfile.write('),\n') def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('projection', node) if value is not None and 'projection' not in already_processed: already_processed.add('projection') self.projection = value self.validate_projectionType(self.projection) # validate type projectionType value = find_attr_value_('datum', node)
'iso-8601'}, } def __init__( self, *, delivery_info: "ExportDeliveryInfo", definition: "ExportDefinition", format: Optional[Union[str, "FormatType"]] = None, run_history: Optional["ExportExecutionListResult"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource): """Dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Dimension description. :vartype description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data: :type data: list[str] :ivar total: Total number of data for the dimension. :vartype total: int :ivar category: Dimension category. :vartype category: str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link (url) to the next page of results. :vartype next_link: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self, *, data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled = None self.data = data self.total = None self.category = None self.usage_start = None self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model): """Result of listing dimensions. It contains a list of available dimensions. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): """The request payload to update an alert. :param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source: Source of alert. Possible values include: "Preset", "User". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status. Possible values include: "None", "Active", "Overridden", "Resolved", "Dismissed". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type creation_time: str :param close_time: dateTime in which alert was closed. :type close_time: str :param modification_time: dateTime in which alert was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the alert status was last modified. :type status_modification_time: str """ _attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional["AlertPropertiesDefinition"] = None, description: Optional[str] = None, source: Optional[Union[str, "AlertSource"]] = None, details: Optional["AlertPropertiesDetails"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, "AlertStatus"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): """The details of the error. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: Error code. :vartype code: str :ivar message: Error message indicating why the operation failed. :vartype message: str """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None class ErrorResponse(msrest.serialization.Model): """Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message. Some Error responses: * 429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header. * 503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header. :param error: The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, *, error: Optional["ErrorDetails"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): """The Resource model definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__( self, *, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.e_tag = e_tag class Export(ProxyResource): """An export resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str] = None, format: Optional[Union[str, "FormatType"]] = None, delivery_info: Optional["ExportDeliveryInfo"] = None, definition: Optional["ExportDefinition"] = None, run_history: Optional["ExportExecutionListResult"] = None, schedule: Optional["ExportSchedule"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model): """The definition for data in the export. :param granularity: The granularity of rows in the export. Currently only 'Daily' is supported. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type
<reponame>kashaiahyah85/MichelAI # -*- coding: utf-8 -*- ''' MichelAI, designed by <NAME> for Bend the Future first album ''' from PyQt5 import QtCore, QtGui, QtWidgets import os import random import sys import time import wave import threading import cv2 from moviepy.video.io.VideoFileClip import VideoFileClip import numpy as np import tensorflow as tf import tensorflow_hub as hub class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(493, 527) self.verticalLayoutWidget = QtWidgets.QWidget(Dialog) self.verticalLayoutWidget.setGeometry(QtCore.QRect(9, 9, 471, 101)) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.label = QtWidgets.QLabel(self.verticalLayoutWidget) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.gFILENAME = QtWidgets.QLineEdit(self.verticalLayoutWidget) self.gFILENAME.setText("") self.gFILENAME.setObjectName("gFILENAME") self.horizontalLayout.addWidget(self.gFILENAME) self.verticalLayout.addLayout(self.horizontalLayout) self.gridLayout_2 = QtWidgets.QGridLayout() self.gridLayout_2.setObjectName("gridLayout_2") self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget) self.label_2.setObjectName("label_2") self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1) self.gFPSWANTED = QtWidgets.QSpinBox(self.verticalLayoutWidget) self.gFPSWANTED.setMaximumSize(QtCore.QSize(50, 16777215)) self.gFPSWANTED.setMinimum(5) self.gFPSWANTED.setMaximum(1000) self.gFPSWANTED.setProperty("value", 30) self.gFPSWANTED.setObjectName("gFPSWANTED") self.gridLayout_2.addWidget(self.gFPSWANTED, 1, 1, 1, 1) self.gWIDTH = QtWidgets.QLineEdit(self.verticalLayoutWidget) self.gWIDTH.setMaximumSize(QtCore.QSize(30, 16777215)) self.gWIDTH.setMaxLength(3750) self.gWIDTH.setObjectName("gWIDTH") self.gridLayout_2.addWidget(self.gWIDTH, 1, 3, 1, 1) self.gHEIGTH = QtWidgets.QLineEdit(self.verticalLayoutWidget) self.gHEIGTH.setMaximumSize(QtCore.QSize(30, 16777215)) self.gHEIGTH.setMaxLength(3750) self.gHEIGTH.setObjectName("gHEIGTH") self.gridLayout_2.addWidget(self.gHEIGTH, 1, 5, 1, 1) self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget) self.label_5.setObjectName("label_5") self.gridLayout_2.addWidget(self.label_5, 1, 4, 1, 1) self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget) self.label_4.setObjectName("label_4") self.gridLayout_2.addWidget(self.label_4, 1, 2, 1, 1) self.gUSETHEME = QtWidgets.QCheckBox(self.verticalLayoutWidget) self.gUSETHEME.setObjectName("gUSETHEME") self.gridLayout_2.addWidget(self.gUSETHEME, 1, 6, 1, 1) self.gQUALITY = QtWidgets.QComboBox(self.verticalLayoutWidget) self.gQUALITY.setObjectName("gQUALITY") self.gQUALITY.addItem("") self.gQUALITY.addItem("") self.gQUALITY.addItem("") self.gridLayout_2.addWidget(self.gQUALITY, 1, 7, 1, 1) self.verticalLayout.addLayout(self.gridLayout_2) self.line = QtWidgets.QFrame(Dialog) self.line.setGeometry(QtCore.QRect(0, 130, 2511, 16)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.gINFOTEXT = QtWidgets.QLabel(Dialog) self.gINFOTEXT.setGeometry(QtCore.QRect(10, 490, 381, 31)) font = QtGui.QFont() font.setPointSize(18) self.gINFOTEXT.setFont(font) self.gINFOTEXT.setText("") self.gINFOTEXT.setObjectName("gINFOTEXT") self.gPREVIEW = QtWidgets.QLabel(Dialog) self.gPREVIEW.setGeometry(QtCore.QRect(253, 210, 231, 231)) font = QtGui.QFont() font.setPointSize(16) self.gPREVIEW.setFont(font) self.gPREVIEW.setFrameShape(QtWidgets.QFrame.Box) self.gPREVIEW.setObjectName("gPREVIEW") self.verticalLayoutWidget_2 = QtWidgets.QWidget(Dialog) self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 140, 471, 71)) self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.gNBVIDS = QtWidgets.QSpinBox(self.verticalLayoutWidget_2) self.gNBVIDS.setMinimum(1) self.gNBVIDS.setMaximum(100) self.gNBVIDS.setProperty("value", 1) self.gNBVIDS.setObjectName("gNBVIDS") self.gridLayout.addWidget(self.gNBVIDS, 0, 1, 1, 1) self.gSENSIBILITY = QtWidgets.QDoubleSpinBox(self.verticalLayoutWidget_2) self.gSENSIBILITY.setMaximum(1.0) self.gSENSIBILITY.setSingleStep(0.01) self.gSENSIBILITY.setProperty("value", 0.7) self.gSENSIBILITY.setObjectName("gSENSIBILITY") self.gridLayout.addWidget(self.gSENSIBILITY, 0, 3, 1, 1) self.gREALFRAMES = QtWidgets.QSpinBox(self.verticalLayoutWidget_2) self.gREALFRAMES.setMinimum(1) self.gREALFRAMES.setMaximum(200) self.gREALFRAMES.setProperty("value", 5) self.gREALFRAMES.setObjectName("gREALFRAMES") self.gridLayout.addWidget(self.gREALFRAMES, 0, 5, 1, 1) self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget_2) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 0, 4, 1, 1) self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget_2) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget_2) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 0, 2, 1, 1) self.gRANDOMIZETHEME = QtWidgets.QCheckBox(self.verticalLayoutWidget_2) self.gRANDOMIZETHEME.setObjectName("gRANDOMIZETHEME") self.gridLayout.addWidget(self.gRANDOMIZETHEME, 1, 0, 1, 1) self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget_2) self.label_9.setObjectName("label_9") self.gridLayout.addWidget(self.label_9, 1, 1, 1, 1) self.gRANDOMIZETHEMES = QtWidgets.QSpinBox(self.verticalLayoutWidget_2) self.gRANDOMIZETHEMES.setMaximum(1000) self.gRANDOMIZETHEMES.setProperty("value", 500) self.gRANDOMIZETHEMES.setObjectName("gRANDOMIZETHEMES") self.gridLayout.addWidget(self.gRANDOMIZETHEMES, 1, 2, 1, 1) self.label_10 = QtWidgets.QLabel(self.verticalLayoutWidget_2) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 1, 4, 1, 1) self.gIMGBUFFERSIZE = QtWidgets.QSpinBox(self.verticalLayoutWidget_2) self.gIMGBUFFERSIZE.setMinimum(10) self.gIMGBUFFERSIZE.setMaximum(10006) self.gIMGBUFFERSIZE.setProperty("value", 100) self.gIMGBUFFERSIZE.setObjectName("gIMGBUFFERSIZE") self.gridLayout.addWidget(self.gIMGBUFFERSIZE, 1, 5, 1, 1) self.verticalLayout_2.addLayout(self.gridLayout) self.gridLayoutWidget_2 = QtWidgets.QWidget(Dialog) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 210, 241, 126)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_13.setObjectName("label_13") self.gridLayout_3.addWidget(self.label_13, 2, 0, 1, 1) self.gFREQGROUP = QtWidgets.QSpinBox(self.gridLayoutWidget_2) self.gFREQGROUP.setMinimum(1) self.gFREQGROUP.setMaximum(10) self.gFREQGROUP.setProperty("value", 3) self.gFREQGROUP.setObjectName("gFREQGROUP") self.gridLayout_3.addWidget(self.gFREQGROUP, 0, 1, 1, 1) self.gFLOORFREQARR = QtWidgets.QSpinBox(self.gridLayoutWidget_2) self.gFLOORFREQARR.setMaximum(100) self.gFLOORFREQARR.setProperty("value", 3) self.gFLOORFREQARR.setObjectName("gFLOORFREQARR") self.gridLayout_3.addWidget(self.gFLOORFREQARR, 1, 1, 1, 1) self.label_11 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_11.setObjectName("label_11") self.gridLayout_3.addWidget(self.label_11, 0, 0, 1, 1) self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_12.setObjectName("label_12") self.gridLayout_3.addWidget(self.label_12, 1, 0, 1, 1) self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_14.setObjectName("label_14") self.gridLayout_3.addWidget(self.label_14, 3, 0, 1, 1) self.gZMAX = QtWidgets.QDoubleSpinBox(self.gridLayoutWidget_2) self.gZMAX.setProperty("value", 2.0) self.gZMAX.setObjectName("gZMAX") self.gridLayout_3.addWidget(self.gZMAX, 2, 1, 1, 1) self.gYMAX = QtWidgets.QDoubleSpinBox(self.gridLayoutWidget_2) self.gYMAX.setProperty("value", 1.0) self.gYMAX.setObjectName("gYMAX") self.gridLayout_3.addWidget(self.gYMAX, 3, 1, 1, 1) self.label_15 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_15.setObjectName("label_15") self.gridLayout_3.addWidget(self.label_15, 4, 0, 1, 1) self.gFFTDECREASE = QtWidgets.QSpinBox(self.gridLayoutWidget_2) self.gFFTDECREASE.setProperty("value", 1) self.gFFTDECREASE.setObjectName("gFFTDECREASE") self.gridLayout_3.addWidget(self.gFFTDECREASE, 4, 1, 1, 1) self.gESTIMATE = QtWidgets.QLabel(Dialog) self.gESTIMATE.setGeometry(QtCore.QRect(20, 490, 391, 21)) self.gESTIMATE.setObjectName("gESTIMATE") self.label_3 = QtWidgets.QLabel(Dialog) self.label_3.setGeometry(QtCore.QRect(180, 120, 261, 16)) self.label_3.setObjectName("label_3") self.gProgress = QtWidgets.QProgressBar(Dialog) self.gProgress.setGeometry(QtCore.QRect(10, 450, 471, 23)) self.gProgress.setProperty("value", 0) self.gProgress.setObjectName("gProgress") self.gStart = QtWidgets.QPushButton(Dialog) self.gStart.setGeometry(QtCore.QRect(410, 490, 75, 23)) self.gStart.setObjectName("gStart") self.retranslateUi(Dialog) self.gStart.clicked.connect(self.mainlogic) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "MichelAI")) self.label.setText(_translate("Dialog", "Music file name :")) self.label_2.setText(_translate("Dialog", "FPS :")) self.gHEIGTH.setText(_translate("Dialog", "1080")) self.label_5.setText(_translate("Dialog", "Height :")) self.label_4.setText(_translate("Dialog", "Width :")) self.gWIDTH.setText(_translate("Dialog", "1920")) self.gUSETHEME.setText(_translate("Dialog", "Use custom theme")) self.gQUALITY.setItemText(0, _translate("Dialog", "LD")) self.gQUALITY.setItemText(1, _translate("Dialog", "MD")) self.gQUALITY.setItemText(2, _translate("Dialog", "HD")) self.label_3.setText(_translate("Dialog", "Advanced parameters")) self.gStart.setText(_translate("Dialog", "Start")) self.gPREVIEW.setText(_translate("Dialog", "")) self.label_8.setText(_translate("Dialog", "Real frames :")) self.label_6.setText(_translate("Dialog", "Number of vids:")) self.label_7.setText(_translate("Dialog", "Sensibility :")) self.gRANDOMIZETHEME.setText(_translate("Dialog", "Randomize theme")) self.label_9.setText(_translate("Dialog", "Randomized themes:")) self.label_10.setText(_translate("Dialog", "Img buffer:")) self.label_13.setText(_translate("Dialog", "Z max :")) self.label_11.setText(_translate("Dialog", "Frequency group :")) self.label_12.setText(_translate("Dialog", "Floor freq array:")) self.label_14.setText(_translate("Dialog", "Y max :")) self.label_15.setText(_translate("Dialog", "FFT Decrease :")) self.gESTIMATE.setText(_translate("Dialog", "ESTIMATE")) def randomindexes(self, themetable, rando, randos): if (themetable == False): themetable = [616, 723, 115, 451, 604, 475, 51, 113, 415, 417, 445, 488, 489, 599, 611, 646, 966, 967, 971, 506, 530, 607, 101, 108, 111, 494, 502, 507, 592, 644, 657, 696, 815, 902, 107, 441, 509, 753, 816, 440, 448, 545, 632, 715, 744, 487, 500, 554, 724, 812, 907, 991, 992, 993, 102, 409, 455, 470, 531, 629, 701, 714, 749, 807, 852, 995, 112, 340, 405, 406, 421, 476, 490, 518, 525, 584, 594, 623, 673, 739, 472, 510, 684, 0, 1, 2, 7, 25, 27, 29, 35, 300, 301, 390, 393, 401, 402, 404, 413, 425, 429, 444, 447, 459, 464, 471, 508, 526, 527, 528, 583, 600, 605, 624, 633, 643, 655, 725, 737, 818, 821, 949, 950, 951, 970, 972, 973, 974, 975, 976, 979, 980, 984, 486, 517, 535, 557, 577, 826, 948, 365, 437, 439, 512, 552, 574, 576, 590, 635, 677, 712, 719, 720, 825, 840, 881, 910, 938, 947, 977, 978, 70, 73, 88, 516, 533, 551, 661, 674, 703, 854, 879, 936, 72, 90, 366, 418, 426, 438, 450, 461, 484, 503, 513, 515, 529, 532, 541, 563, 582, 596, 619, 625, 638, 640, 662, 726, 736, 849, 873, 900, 968, 969, 21, 24, 28, 33, 34, 37, 71, 148, 388, 392, 453, 454, 473, 562, 606, 668, 695, 716, 747, 776, 783, 787, 845, 850, 855, 878, 908, 942, 76, 78, 89, 419, 427, 442, 514, 613, 626, 639, 663, 688, 780, 784, 828, 839, 844, 872, 875, 889, 904, 917, 941, 75, 77, 281, 296, 385, 387, 400, 452, 544, 546, 567, 572, 579, 588, 612, 622, 667, 722, 940, 65, 74, 295, 297, 304, 386, 449, 534, 540, 547, 556, 559, 685, 728, 837, 901, 911, 994, 618, 659, 5, 9, 31, 36, 43, 47, 149, 150, 221, 228, 290, 291, 294, 302, 308, 319, 320, 321, 339, 349, 350, 352, 353, 359, 364, 368, 382, 389, 391, 403, 420, 443, 460, 492, 553, 558, 571, 587, 589, 593, 595, 597, 598, 608, 614, 620, 627, 658, 664, 699, 704, 713, 721, 748, 755, 877, 954, 955, 957, 958, 963, 987, 989, 990, 69, 360, 435, 483, 538, 550, 679, 687, 708, 863, 899, 903, 52, 53, 55, 56, 63, 64, 79, 87, 92, 97, 110, 122, 130, 303, 305, 306, 307, 399, 407, 412, 432, 467, 469, 477, 491, 521, 524, 537, 543, 591, 617, 621, 656, 686, 700, 705, 732, 759, 857, 862, 892, 895, 920, 91, 121, 505, 682, 710, 898, 82, 93, 118, 127, 145, 160, 333, 408, 457, 458, 463, 474, 478, 497, 504, 511, 519, 520, 523, 536, 539, 542, 548, 586, 602, 615, 628, 689, 718, 772, 778, 801, 833, 846, 861, 876, 883, 888, 893, 924, 934, 945, 960, 985, 986, 39, 40, 45, 46, 48, 57, 80, 94, 98, 106, 109, 117, 119, 129, 244, 270, 271, 283, 288, 289, 327, 328, 332, 347, 358, 397, 398, 411, 422, 423, 424, 428, 430, 436, 446, 456, 462, 465, 522, 601, 609, 610, 649, 653, 702, 706, 707, 709, 727, 729, 730, 733, 738, 740, 754, 761, 766, 769, 773, 777, 796, 802, 804, 835, 865, 909, 923, 933, 943, 944, 959, 962, 983, 996, 96, 103, 585, 603, 672, 683, 691, 694, 871, 884, 887, 891, 896, 961, 964, 61, 62, 84, 95, 136, 325, 395, 431, 434, 569, 570, 581, 631, 642, 645, 647, 666, 669, 690, 693, 711, 746, 794, 800, 810, 820, 832, 843, 858, 866, 874, 915, 919, 921, 927, 932, 953, 956, 14, 42, 54, 59, 83, 86, 105, 123, 248, 277, 279, 298, 299, 309, 317, 324, 334, 363, 367, 394, 410, 414, 416, 466, 468, 479, 493, 496, 498, 499, 561, 564, 568, 573, 578, 580, 630, 636, 641, 648, 650, 660, 675, 680, 692, 697, 717, 731, 743, 745, 750, 756, 767, 788, 789, 791, 792, 798, 799, 806, 809, 813, 819, 822, 842, 853, 859, 860, 868, 882, 886, 897, 905, 906, 918, 925, 926, 928, 930, 931, 937, 939, 946, 952, 965, 988, 997, 100, 652,
(inv_s, p)) def test_genpolicy_policygroups_multiple(self): '''Test genpolicy (multiple policygroups)''' test_policygroup2 = "test-policygroup2" contents = ''' # %s #include <abstractions/kde> #include <abstractions/openssl> ''' % (self.test_policygroup) open(os.path.join(self.tmpdir, 'policygroups', test_policygroup2), 'w').write(contents) groups = "%s,%s" % (self.test_policygroup, test_policygroup2) p = self._gen_policy(extra_args=['--policy-groups=%s' % groups]) for s in ['#include <abstractions/nameservice>', '#include <abstractions/gnome>', '#include <abstractions/kde>', '#include <abstractions/openssl>']: self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p)) inv_s = '###POLICYGROUPS###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_policygroups_nonexistent(self): '''Test genpolicy (nonexistent policygroup)''' try: self._gen_policy(extra_args=['--policy-groups=nonexistent']) except easyprof.AppArmorException: return except Exception: raise raise Exception ("policygroup should be invalid") def test_genpolicy_readpath_file(self): '''Test genpolicy (read-path file)''' s = "/opt/test-foo" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search = "%s rk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_home_file(self): '''Test genpolicy (read-path file in /home)''' s = "/home/*/test-foo" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search = "owner %s rk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_homevar_file(self): '''Test genpolicy (read-path file in @{HOME})''' s = "@{HOME}/test-foo" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search = "owner %s rk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_homedirs_file(self): '''Test genpolicy (read-path file in @{HOMEDIRS})''' s = "@{HOMEDIRS}/test-foo" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search = "owner %s rk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_dir(self): '''Test genpolicy (read-path directory/)''' s = "/opt/test-foo-dir/" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search_terms = ["%s rk," % s, "%s** rk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_dir_glob(self): '''Test genpolicy (read-path directory/*)''' s = "/opt/test-foo-dir/*" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search_terms = ["%s rk," % os.path.dirname(s), "%s rk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_dir_glob_all(self): '''Test genpolicy (read-path directory/**)''' s = "/opt/test-foo-dir/**" p = self._gen_policy(extra_args=['--read-path=%s' % s]) search_terms = ["%s rk," % os.path.dirname(s), "%s rk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_multiple(self): '''Test genpolicy (read-path multiple)''' paths = ["/opt/test-foo", "/home/*/test-foo", "@{HOME}/test-foo", "@{HOMEDIRS}/test-foo", "/opt/test-foo-dir/", "/opt/test-foo-dir/*", "/opt/test-foo-dir/**"] args = [] search_terms = [] for s in paths: args.append('--read-path=%s' % s) # This mimics easyprof.gen_path_rule() owner = "" if s.startswith('/home/') or s.startswith("@{HOME"): owner = "owner " if s.endswith('/'): search_terms.append("%s rk," % (s)) search_terms.append("%s%s** rk," % (owner, s)) elif s.endswith('/**') or s.endswith('/*'): search_terms.append("%s rk," % (os.path.dirname(s))) search_terms.append("%s%s rk," % (owner, s)) else: search_terms.append("%s%s rk," % (owner, s)) p = self._gen_policy(extra_args=args) for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_readpath_bad(self): '''Test genpolicy (read-path bad)''' s = "bar" try: self._gen_policy(extra_args=['--read-path=%s' % s]) except easyprof.AppArmorException: return except Exception: raise raise Exception ("read-path should be invalid") def test_genpolicy_writepath_file(self): '''Test genpolicy (write-path file)''' s = "/opt/test-foo" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search = "%s rwk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_home_file(self): '''Test genpolicy (write-path file in /home)''' s = "/home/*/test-foo" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search = "owner %s rwk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_homevar_file(self): '''Test genpolicy (write-path file in @{HOME})''' s = "@{HOME}/test-foo" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search = "owner %s rwk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_homedirs_file(self): '''Test genpolicy (write-path file in @{HOMEDIRS})''' s = "@{HOMEDIRS}/test-foo" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search = "owner %s rwk," % s self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_dir(self): '''Test genpolicy (write-path directory/)''' s = "/opt/test-foo-dir/" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search_terms = ["%s rwk," % s, "%s** rwk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_dir_glob(self): '''Test genpolicy (write-path directory/*)''' s = "/opt/test-foo-dir/*" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search_terms = ["%s rwk," % os.path.dirname(s), "%s rwk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_dir_glob_all(self): '''Test genpolicy (write-path directory/**)''' s = "/opt/test-foo-dir/**" p = self._gen_policy(extra_args=['--write-path=%s' % s]) search_terms = ["%s rwk," % os.path.dirname(s), "%s rwk," % s] for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_multiple(self): '''Test genpolicy (write-path multiple)''' paths = ["/opt/test-foo", "/home/*/test-foo", "@{HOME}/test-foo", "@{HOMEDIRS}/test-foo", "/opt/test-foo-dir/", "/opt/test-foo-dir/*", "/opt/test-foo-dir/**"] args = [] search_terms = [] for s in paths: args.append('--write-path=%s' % s) # This mimics easyprof.gen_path_rule() owner = "" if s.startswith('/home/') or s.startswith("@{HOME"): owner = "owner " if s.endswith('/'): search_terms.append("%s rwk," % (s)) search_terms.append("%s%s** rwk," % (owner, s)) elif s.endswith('/**') or s.endswith('/*'): search_terms.append("%s rwk," % (os.path.dirname(s))) search_terms.append("%s%s rwk," % (owner, s)) else: search_terms.append("%s%s rwk," % (owner, s)) p = self._gen_policy(extra_args=args) for search in search_terms: self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p)) inv_s = '###READPATH###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_writepath_bad(self): '''Test genpolicy (write-path bad)''' s = "bar" try: self._gen_policy(extra_args=['--write-path=%s' % s]) except easyprof.AppArmorException: return except Exception: raise raise Exception ("write-path should be invalid") def test_genpolicy_templatevar(self): '''Test genpolicy (template-var single)''' s = "@{FOO}=bar" p = self._gen_policy(extra_args=['--template-var=%s' % s]) k, v = s.split('=') s = '%s="%s"' % (k, v) self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p)) inv_s = '###TEMPLATEVAR###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_templatevar_multiple(self): '''Test genpolicy (template-var multiple)''' variables = ['@{FOO}=bar', '@{BAR}=baz'] args = [] for s in variables: args.append('--template-var=%s' % s) p = self._gen_policy(extra_args=args) for s in variables: k, v = s.split('=') s = '%s="%s"' % (k, v) self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p)) inv_s = '###TEMPLATEVAR###' self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p)) def test_genpolicy_templatevar_bad(self): '''Test genpolicy (template-var - bad values)''' bad = [ "{FOO}=bar", "@FOO}=bar", "@{FOO=bar", "FOO=bar", "@FOO=bar", "@{FOO}=/../../../etc/passwd", "@{FOO}=bar=foo", "@{FOO;BAZ}=bar", '@{FOO}=bar"baz', ] for s in bad: try: self._gen_policy(extra_args=['--template-var=%s' % s]) except easyprof.AppArmorException: continue except Exception: raise raise Exception ("template-var should be invalid") def test_genpolicy_invalid_template_policy(self): '''Test genpolicy (invalid template policy)''' # create a new template template = os.path.join(self.tmpdir, "test-invalid-template") shutil.copy(os.path.join(self.tmpdir, 'templates', self.test_template), template) contents = open(template).read() bad_pol = "" bad_string = "bzzzt" for line in contents.splitlines(): if '}' in line: bad_pol += bad_string else: bad_pol += line bad_pol += "\n" open(template, 'w').write(bad_pol) try: self._gen_policy(template=template) except easyprof.AppArmorException: return except Exception: raise raise Exception ("policy should be invalid") def test_genpolicy_no_binary_without_profile_name(self): '''Test genpolicy (no binary with no profile name)''' try: easyprof.gen_policy_params(None, self.options) except easyprof.AppArmorException: return except Exception: raise raise Exception ("No binary or profile name should have been invalid") def test_genpolicy_with_binary_with_profile_name(self): '''Test genpolicy (binary
<gh_stars>1-10 import random import glob import re import string from matplotlib import pyplot as plt import matplotlib import matplotlib.animation from .utils import * from .transform_data import * from .parse_files import * import seaborn as sns def heat_map(grid, name, **kwargs): """ Generic function for making a heat map based on the values in a grid. Arguments: grid - the grid of numbers or binary strings to be visualized. name - string indicating what the file storing the image should be called. kwargs: palette - a seaborn palette (list of RGB values) indicating how to color values. Will be converted to a continuous colormap if necessary denom - the maximum value of numbers in the grid (only used if the grid actually contains numbers). This is used to normalize values and use the full dynamic range of the color pallete. """ denom, palette = get_kwargs(grid, kwargs) if "mask_zeros" in kwargs: mask_zeros = kwargs["mask_zeros"] else: mask_zeros = False grid = color_grid(grid, palette, denom, mask_zeros) make_imshow_plot(grid, name) def paired_environment_phenotype_movie(environment, phenotypes, **kwargs): """ Makes an animation overlaying colored circles representing phenotypes over an imshow() plot indicating the resources present in each cell. By default, color is determined using the palettes in the EnvironmentFile object passed as the first parameter. The easiest way to change color palettes is to assign new palettes to environment.task_palette and environment.resource_palette before calling this function. If either the environment or phenotypes grids contain integers greater than 1, you should pass a `denom` keyword argument indicating how to normalize them. Using differnet denoms for the environment and phenotypes is not currently supported (if you need to, you should probably just divide everything by the appropraite denoms before passing them to this funciton). Inputs: environment - an EnvironmentFile object indicatng the distribution of resources and the appropriate palettes to use. phenotypes - a 2d array of numbers or binary strings representing the placement of phenotypes across the environment kwargs: denom - an integer indicating how to normalize numbers in the environment and phenotype grids if neccesary. Outputs: Returns a matplotlib animation object. Saves animation in the file: [environment_file_identifier]_phenotype_overlay.mp4 """ denom, palette = get_kwargs(environment, kwargs) # Create figure to do plotting fig = plt.figure(figsize=(20, 20)) # Create list of circles at every place in environment patches = [] for i in range(len(phenotypes)): for j in range(len(phenotypes[i])): patches.append(plt.Circle((j, i), radius=.3, lw=2, ec="black", facecolor=None, zorder=2)) # This will be called to color niches, which are always in background def init(): plot_world(environment, palette=environment.resource_palette, denom=denom) for p in patches: fig.gca().add_patch(p) return patches # Change colors of circles as appropriate for new time step def animate(n): phen_grid = slice_3d_grid(phenotypes, n) # Recolor circles plot_phens_blits(phen_grid, patches, palette=environment.task_palette, denom=denom) return patches # Do actual animation anim = matplotlib.animation.FuncAnimation( fig, animate, init_func=init, frames=len(phenotypes[0][0]), blit=True, interval=750) print(environment.name) # print(anim.to_html5_video()) anim.save(environment.name + "_phenotype_overlay.mov", writer=matplotlib.animation.ImageMagickWriter()) return anim def make_movie(phenotypes, **kwargs): """ Makes an animation overlaying colored circles representing phenotypes over an imshow() plot indicating the resources present in each cell. By default, color is determined using the palettes in the EnvironmentFile object passed as the first parameter. The easiest way to change color palettes is to assign new palettes to environment.task_palette and environment.resource_palette before calling this function. If either the environment or phenotypes grids contain integers greater than 1, you should pass a `denom` keyword argument indicating how to normalize them. Using differnet denoms for the environment and phenotypes is not currently supported (if you need to, you should probably just divide everything by the appropraite denoms before passing them to this funciton). Inputs: phenotypes - a 2d array of numbers or binary strings representing the placement of phenotypes across the environment kwargs: denom - an integer indicating how to normalize numbers in the environment and phenotype grids if neccesary. Outputs: Returns a matplotlib animation object. Saves animation in the file: [environment_file_identifier]_phenotype_overlay.mp4 """ denom, palette = get_kwargs(phenotypes, kwargs) # Create figure to do plotting fig = plt.figure(figsize=(20, 20)) # Change colors of circles as appropriate for new time step def animate(n): # print("animating", n) phen_grid = slice_3d_grid(phenotypes, n) # print(phen_grid) grid = color_grid(phen_grid, palette, denom, False) plt.tick_params(labelbottom="off", labeltop="off", labelleft="off", labelright="off", bottom="off", top="off", left="off", right="off") plt.imshow(grid, interpolation="nearest", aspect=1, zorder=1) plt.tight_layout() # Do actual animation anim = matplotlib.animation.FuncAnimation( fig, animate, frames=len(phenotypes[0][0]), interval=750) anim.save("movie.mov") return anim def plot_phens(phen_grid, **kwargs): """ Plots circles colored according to the values in phen_grid. -1 serves as a sentinel value, indicating that a circle should not be plotted in that location. """ denom, palette = get_kwargs(phen_grid, kwargs, True) grid = color_grid(phen_grid, palette, denom) for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j] != -1 and tuple(grid[i][j]) != -1: plt.gca().add_patch(plt.Circle((j, i), radius=.3, lw=1, ec="black", facecolor=grid[i][j], zorder=2)) def plot_phens_circles(phen_grid, **kwargs): """ Plots phenotypes represented as concentric circles. Each circle represents one task that the phenotype can perform, with larger circles representing more complex tasks. Arguments: phen_grid - a 2D array of strings representing binary numbers kwargs: palette - a seaborn palette (list of RGB values) indicating how to color values. Will be converted to a continuous colormap if necessary denom - the maximum value of numbers in the grid (only used if the grid actually contains numbers). This is used to normalize values and use the full dynamic range of the color pallete. TODO: come up with way to represent organisms that don't do any tasks. """ denom, palette = get_kwargs(phen_grid, kwargs, True) n_tasks = len(palette) grid = phen_grid for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j] != -1 and int(grid[i][j], 2) != -1 and \ int(grid[i][j], 2) != 0: first = True b_ind = grid[i][j].find("b") phen = grid[i][j][b_ind+1:] for k in range(len(phen)): if int(phen[k]) == 1: plt.gca().add_patch( plt.Circle( (j, i), radius=(n_tasks - k)*.05, lw=.1 if first else 0, ec="black", facecolor=palette[k], zorder=2+k)) first = False elif int(grid[i][j], 2) == 0: plt.gca().add_patch( plt.Circle( (j, i), radius=(n_tasks)*.05, lw=.1, ec="black", facecolor="grey", zorder=2)) def plot_phens_blits(phen_grid, patches, **kwargs): """ A version of plot_phens designed to be used in animations. Takes a 2D array of phenotypes and a list of matplotlib patch objects that have already been added to the current axes and recolors the patches based on the array. """ denom, palette = get_kwargs(phen_grid, kwargs) grid = color_grid(phen_grid, palette, denom) for i in range(len(grid)): for j in range(len(grid[i])): curr_patch = patches[i * len(grid[i]) + j] if grid[i][j] == -1: curr_patch.set_visible(False) else: curr_patch.set_facecolor(grid[i][j]) curr_patch.set_visible(True) return patches def plot_world(world, **kwargs): """ Addes a heat-map representing the data in world (an EnvironmentFile object) to the current plot. kwargs: palette - a seaborn palette (list of RGB values) indicating how to color values. Will be converted to a continuous colormap if necessary denom - the maximum value of numbers in the grid (only used if the grid actually contains numbers). This is used to normalize values and use the full dynamic range of the color pallete. """ denom, palette = get_kwargs(world, kwargs, False) world = color_grid(world, palette, denom, True) plt.tick_params(labelbottom="off", labeltop="off", labelleft="off", labelright="off", bottom="off", top="off", left="off", right="off") # plt.tight_layout() plt.imshow(world, interpolation="none", zorder=1) axes = plt.gca() axes.autoscale(False) def paired_environment_phenotype_grid(environment, phenotypes, **kwargs): """ Plots the given environment (EnvironmentFile object) and phenotypes (2d array of numbers or binary strings) onto the same image and saves the image based on the name of the environment file. The environment file will be represented by coloring square cells, while the phenotypes are circles overlaid on top. By default, color is determined using the palettes in the EnvironmentFile object passed as the first parameter. The easiest way to change color palettes is to assign new palettes to environment.task_palette and environment.resource_palette before calling this function. If either the environment or phenotypes grids contain integers greater than 1, you should pass a `denom` keyword argument indicating how to normalize them. Using differnet denoms for the environment and phenotypes is not currently supported (if you need to, you should probably just divide everything by the appropraite denoms before passing them to this funciton). Inputs: environment - an EnvironmentFile object indicatng
can be seen as outgoing edges on a graph. if self._is_identity: self._inverse = self else: self._inverse = None def _del_derived(self): r""" Delete the derived quantities of ``self``. TESTS:: sage: M = Manifold(2, 'M', structure='topological') sage: X.<x,y> = M.chart() sage: f = M.homeomorphism(M, [x+y, x-y]) sage: f^(-1) Homeomorphism of the 2-dimensional topological manifold M sage: f._inverse # was set by f^(-1) Homeomorphism of the 2-dimensional topological manifold M sage: f._del_derived() sage: f._inverse # has been set to None by _del_derived() """ self._restrictions.clear() self._restrictions_graph = {(self._domain, self._codomain): self} self._extensions_graph = {(self._domain, self._codomain): self} if not self._is_identity: self._inverse = None def display(self, chart1=None, chart2=None): r""" Display the expression of ``self`` in one or more pair of charts. If the expression is not known already, it is computed from some expression in other charts by means of change-of-coordinate formulas. INPUT: - ``chart1`` -- (default: ``None``) chart on the domain of ``self``; if ``None``, the display is performed on all the charts on the domain in which the map is known or computable via some change of coordinates - ``chart2`` -- (default: ``None``) chart on the codomain of ``self``; if ``None``, the display is performed on all the charts on the codomain in which the map is known or computable via some change of coordinates The output is either text-formatted (console mode) or LaTeX-formatted (notebook mode). EXAMPLES: Standard embedding of the sphere `S^2` in `\RR^3`:: sage: M = Manifold(2, 'S^2', structure='topological') # the 2-dimensional sphere S^2 sage: U = M.open_subset('U') # complement of the North pole sage: c_xy.<x,y> = U.chart() # stereographic coordinates from the North pole sage: V = M.open_subset('V') # complement of the South pole sage: c_uv.<u,v> = V.chart() # stereographic coordinates from the South pole sage: M.declare_union(U,V) # S^2 is the union of U and V sage: N = Manifold(3, 'R^3', latex_name=r'\RR^3', structure='topological') # R^3 sage: c_cart.<X,Y,Z> = N.chart() # Cartesian coordinates on R^3 sage: Phi = M.continuous_map(N, ....: {(c_xy, c_cart): [2*x/(1+x^2+y^2), 2*y/(1+x^2+y^2), (x^2+y^2-1)/(1+x^2+y^2)], ....: (c_uv, c_cart): [2*u/(1+u^2+v^2), 2*v/(1+u^2+v^2), (1-u^2-v^2)/(1+u^2+v^2)]}, ....: name='Phi', latex_name=r'\Phi') sage: Phi.display(c_xy, c_cart) Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) sage: Phi.display(c_uv, c_cart) Phi: S^2 --> R^3 on V: (u, v) |--> (X, Y, Z) = (2*u/(u^2 + v^2 + 1), 2*v/(u^2 + v^2 + 1), -(u^2 + v^2 - 1)/(u^2 + v^2 + 1)) The LaTeX output:: sage: latex(Phi.display(c_xy, c_cart)) \begin{array}{llcl} \Phi:& S^2 & \longrightarrow & \RR^3 \\ \mbox{on}\ U : & \left(x, y\right) & \longmapsto & \left(X, Y, Z\right) = \left(\frac{2 \, x}{x^{2} + y^{2} + 1}, \frac{2 \, y}{x^{2} + y^{2} + 1}, \frac{x^{2} + y^{2} - 1}{x^{2} + y^{2} + 1}\right) \end{array} If the argument ``chart2`` is not specified, the display is performed on all the charts on the codomain in which the map is known or computable via some change of coordinates (here only one chart: ``c_cart``):: sage: Phi.display(c_xy) Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) Similarly, if the argument ``chart1`` is omitted, the display is performed on all the charts on the domain of ``Phi`` in which the map is known or computable via some change of coordinates:: sage: Phi.display(chart2=c_cart) Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) on V: (u, v) |--> (X, Y, Z) = (2*u/(u^2 + v^2 + 1), 2*v/(u^2 + v^2 + 1), -(u^2 + v^2 - 1)/(u^2 + v^2 + 1)) If neither ``chart1`` nor ``chart2`` is specified, the display is performed on all the pair of charts in which ``Phi`` is known or computable via some change of coordinates:: sage: Phi.display() Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) on V: (u, v) |--> (X, Y, Z) = (2*u/(u^2 + v^2 + 1), 2*v/(u^2 + v^2 + 1), -(u^2 + v^2 - 1)/(u^2 + v^2 + 1)) If a chart covers entirely the map's domain, the mention "on ..." is omitted:: sage: Phi.restrict(U).display() Phi: U --> R^3 (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) A shortcut of ``display()`` is ``disp()``:: sage: Phi.disp() Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x^2 + y^2 + 1), 2*y/(x^2 + y^2 + 1), (x^2 + y^2 - 1)/(x^2 + y^2 + 1)) on V: (u, v) |--> (X, Y, Z) = (2*u/(u^2 + v^2 + 1), 2*v/(u^2 + v^2 + 1), -(u^2 + v^2 - 1)/(u^2 + v^2 + 1)) Display when SymPy is the symbolic engine:: sage: M.set_calculus_method('sympy') sage: N.set_calculus_method('sympy') sage: Phi.display(c_xy, c_cart) Phi: S^2 --> R^3 on U: (x, y) |--> (X, Y, Z) = (2*x/(x**2 + y**2 + 1), 2*y/(x**2 + y**2 + 1), (x**2 + y**2 - 1)/(x**2 + y**2 + 1)) sage: latex(Phi.display(c_xy, c_cart)) \begin{array}{llcl} \Phi:& S^2 & \longrightarrow & \RR^3 \\ \mbox{on}\ U : & \left(x, y\right) & \longmapsto & \left(X, Y, Z\right) = \left(\frac{2 x}{x^{2} + y^{2} + 1}, \frac{2 y}{x^{2} + y^{2} + 1}, \frac{x^{2} + y^{2} - 1}{x^{2} + y^{2} + 1}\right) \end{array} """ from sage.misc.latex import latex from sage.tensor.modules.format_utilities import FormattedExpansion def _display_expression(self, chart1, chart2, result): r""" Helper function for :meth:`display`. """ from sage.misc.latex import latex try: coord_func = self.coord_functions(chart1, chart2) expression = coord_func.expr() coords1 = chart1[:] if len(coords1) == 1: coords1 = coords1[0] coords2 = chart2[:] if len(coords2) == 1: coords2 = coords2[0] if chart1._domain == self._domain: result._txt += " " result._latex += " & " else: result._txt += "on " + chart1._domain._name + ": " result._latex += r"\mbox{on}\ " + latex(chart1._domain) + \ r": & " result._txt += repr(coords1) + " |--> " result._latex += latex(coords1) + r"& \longmapsto & " if chart2 == chart1: if len(expression) == 1: result._txt += repr(expression[0]) + "\n" result._latex += latex(coord_func[0]) + r"\\" else: result._txt += repr(expression) + "\n" result._latex += latex(coord_func) + r"\\" else: if len(expression) == 1: result._txt += repr(coords2[0]) + " = " + \ repr(expression[0]) + "\n" result._latex += latex(coords2[0]) + " = " + \ latex(coord_func[0]) + r"\\" else: result._txt += repr(coords2) + " = " + \ repr(expression) + "\n" result._latex += latex(coords2) + " = " + \ latex(coord_func) + r"\\" except (TypeError, ValueError): pass result = FormattedExpansion() if self._name is None: symbol = "" else: symbol = self._name + ": " result._txt = symbol + self._domain._name + " --> " + \ self._codomain._name + "\n" if self._latex_name is None: symbol = "" else: symbol = self._latex_name + ":" result._latex = r"\begin{array}{llcl} " + symbol + r"&" + \ latex(self._domain) + r"& \longrightarrow & " + \ latex(self._codomain) + r"\\" if chart1 is None: if chart2 is None: for ch1 in self._domain._top_charts: for ch2 in self._codomain.atlas(): _display_expression(self, ch1, ch2, result) else: for ch1 in self._domain._top_charts: _display_expression(self, ch1, chart2, result) else: if chart2 is None: for ch2 in self._codomain.atlas(): _display_expression(self, chart1, ch2, result) else: _display_expression(self, chart1, chart2, result) result._txt = result._txt[:-1] result._latex = result._latex[:-2] + r"\end{array}" return result disp = display def coord_functions(self, chart1=None, chart2=None): r""" Return the functions of the coordinates representing ``self`` in a given pair of charts. If these functions are not already known, they are computed from known ones by means of change-of-chart formulas. INPUT: - ``chart1`` -- (default: ``None``) chart on the domain of ``self``; if ``None``, the domain's
<reponame>psi-rking/optking import logging from copy import deepcopy from itertools import combinations, permutations import numpy as np import qcelemental as qcel from . import bend, cart, dimerfrag, oofp from . import optparams as op from . import stre, tors, v3d from .exceptions import AlgError, OptError from .v3d import are_collinear # Functions related to freezing, fixing, determining, and # adding coordinates. def connectivity_from_distances(geom, Z): """ Creates a matrix (1 or 0) to describe molecular connectivity based on nuclear distances Parameters ---------- geom : ndarray (nat, 3) cartesian geometry Z : list[int] (nat) list of atomic numbers Returns ------- C : ndarray (nat, nat) """ nat = geom.shape[0] C = np.zeros((len(geom), len(geom)), bool) # logger = logging.getLogger(__name__) for i, j in combinations(range(nat), 2): R = v3d.dist(geom[i], geom[j]) Rcov = qcel.covalentradii.get(Z[i], missing=4.0) + qcel.covalentradii.get(Z[j], missing=4.0) # logger.debug("Trying to connect atoms " + str(i) + ' and ' + str(j) + " distance is: " + # str(qcel.covalentradii.get(Z[i], missing=4.0) + qcel.covalentradii.get(Z[j], missing=4.0))) if R < op.Params.covalent_connect * Rcov: C[i, j] = C[j, i] = True return C def add_intcos_from_connectivity(C, intcos, geom): """ Calls add_x_FromConnectivity for each internal coordinate type Parameters ---------- C : ndarray (nat, nat) matrix desribing connectivity see intcosMisc.connectivity_from_distances() intcos : list[simple.Simple] (nat) list of current internal coordinates (Stre, Bend, Tors) geom : ndarray (nat, 3) cartesian geometry """ add_stre_from_connectivity(C, intcos) add_bend_from_connectivity(C, intcos, geom) add_tors_from_connectivity(C, intcos, geom) if op.Params.include_oofp or check_if_oofp_needed(C, intcos, geom): add_oofp_from_connectivity(C, intcos, geom) def add_stre_from_connectivity(C, intcos): """ Adds stretches from connectivity Parameters ---------- C : ndarray (nat, nat) intcos : list[simple.Simple] (nat) Returns ------- """ # Norig = len(intcos) for i, j in combinations(range(len(C)), 2): if C[i, j]: s = stre.Stre(i, j) if s not in intcos: intcos.append(s) # return len(intcos) - Norig # return number added def add_h_bonds(geom, zs: list, num_atoms): """Add Hydrogen bonds to a fragments coordinate list Parameters ---------- geom : np.ndarray zs : list num_atoms : int Returns ------- list[stre.HBond] Notes ----- Look for electronegative atoms. Find hydrogen atoms between covalent radii test and 2.3 Angstroms Check these hydrogen atoms are already bonded to an electronegative atom Check bond angle >= 90 degrees """ logger = logging.getLogger(__name__) # N, O, F, P, S, Cl as proposed by Bakken and Helgaker electroneg_zs = [7, 8, 9, 15, 16, 17] # Get atom indices (within a fragment) for the electronegative atoms present and the # hydrogen atoms present also get electronegs_present = [index for index, z in enumerate(zs) if z in electroneg_zs] hydrogens = [index for index, i in enumerate(zs) if i == 1] # Some shortcuts min_factor = op.Params.covalent_connect limit = op.Params.h_bond_connect cov = qcel.covalentradii.get h_bonds = [] for index_i, i in enumerate(electronegs_present): for j in hydrogens: if j < i: # do only i < j break distance = v3d.dist(geom[i], geom[j]) covalent_thresh = min_factor * (cov(zs[index_i], missing=4.0) + cov(1, missing=4.0)) if limit > distance > covalent_thresh: for k in range(num_atoms): # grab k part in electronegs_present. if k in electronegs_present: test_angle = v3d.angle(geom[k], geom[j], geom[i]) if test_angle >= (np.pi / 2): h_bonds.append(stre.HBond(i, j)) break # Add hydrogen bond if 1 appropriate angle in connected atoms return h_bonds def add_bend_from_connectivity(C, intcos, geom): """ Adds Bends from connectivity Parameters --------- C : ndarray (nat, nat) unitary connectivity matrix intcos : list[simple.Simple] (nat) list of internal coordinates geom : ndarray (nat, 3) cartesian geometry Returns ------- """ # Norig = len(intcos) nat = len(geom) for i, j in permutations(range(nat), 2): if C[i, j]: for k in range(i + 1, nat): # make i<k; the constructor checks too if C[j, k]: try: val = v3d.angle(geom[i], geom[j], geom[k]) except AlgError: pass else: if val > op.Params.linear_bend_threshold: b = bend.Bend(i, j, k, bend_type="LINEAR") if b not in intcos: intcos.append(b) b2 = bend.Bend(i, j, k, bend_type="COMPLEMENT") if b2 not in intcos: intcos.append(b2) else: b = bend.Bend(i, j, k) if b not in intcos: intcos.append(b) # return len(intcos) - Norig def add_tors_from_connectivity(C, intcos, geom): """ Add torisions for all bonds present and determine linearity from existance of linear bends Parameters ---------- C : ndarray (nat, nat) connectivity matrix intcos : list[simple.Simple] (nat) list of stretches, bends, etc... geom : ndarray (nat, 3) cartesian geometry Returns ------- """ # Norig = len(intcos) Natom = len(geom) # Find i-j-k-l where i-j-k && j-k-l are NOT collinear. for i, j in permutations(range(Natom), 2): if C[i, j]: for k in range(Natom): if C[k, j] and k != i: # ensure i-j-k is not collinear; that a regular such bend exists b = bend.Bend(i, j, k) if b not in intcos: continue for l in range(i + 1, Natom): if C[l, k] and l != j: # ensure j-k-l is not collinear b = bend.Bend(j, k, l) if b not in intcos: continue t = tors.Tors(i, j, k, l) if t not in intcos: intcos.append(t) # Search for additional torsions around collinear segments. # Find collinear fragment j-m-k for j, m in permutations(range(Natom), 2): if C[j, m]: for k in range(j + 1, Natom): if C[k, m]: # ignore if regular bend b = bend.Bend(j, m, k) if b in intcos: continue # Found unique, collinear j-m-k # Count atoms bonded to m. nbonds = sum(C[m]) if nbonds == 2: # Nothing else is bonded to m # look for an 'I' for I-J-[m]-k-L such that I-J-K is not collinear J = j i = 0 while i < Natom: if C[i, J] and i != m: # i!=J i!=m b = bend.Bend(i, J, k, bend_type="LINEAR") if b in intcos: # i,J,k is collinear J = i i = 0 continue else: # have I-J-[m]-k. Look for L. I = i K = k l = 0 while l < Natom: if C[l, K] and l != m and l != j and l != i: b = bend.Bend(l, K, J, bend_type="LINEAR") if b in intcos: # J-K-l is collinear K = l l = 0 continue else: # Have found I-J-K-L. L = l try: val = v3d.tors( geom[I], geom[J], geom[K], geom[L], ) except AlgError: pass else: t = tors.Tors(I, J, K, L) if t not in intcos: intcos.append(t) l += 1 i += 1 # return len(intcos) - Norig # For now, let's just check for a single central atom bonded to all others def check_if_oofp_needed(C, intcos, geom): logger = logging.getLogger(__name__) Natom = len(C) maxNneighbors = max([sum(C[i]) for i in range(Natom)]) if maxNneighbors == Natom - 1: logger.debug("check_if_oofp_needed() is turning oofp ON") return True else: return False def add_oofp_from_connectivity(C, intcos, geom): # Look for: (terminal atom)-connected to-(tertiary atom) Nneighbors = [sum(C[i]) for i in range(len(C))] terminal_atoms = [i for i in range(len(Nneighbors)) if Nneighbors[i] == 1] # Find adjacent atoms vertex_atoms = [] for T in terminal_atoms: vertex_atoms.append(np.where(C[T] == True)[0][0]) for (T, V) in zip(terminal_atoms, vertex_atoms): if Nneighbors[V] < 3: pass # Find at least 2 other/side atoms side = [] for N in np.where(C[V] == True)[0]: if N == T: pass else: side.append(N) if len(side) >= 2: try: val = v3d.oofp(geom[T], geom[V], geom[side[0]], geom[side[1]]) except AlgError: raise OptError("Tried to add out-of-plane angle but couldn't evaluate it.") pass else: oneOofp = oofp.Oofp(T, V, side[0], side[1]) if oneOofp not in intcos: intcos.append(oneOofp) return def add_cartesian_intcos(intcos, geom): """ Add cartesian coordinates to intcos (takes place of internal coordinates) Parameters ---------- intcos : list[simple.Simple] (nat) list of coordinates geom : ndarray (nat, 3) cartesian geometry Returns ------- """ # Norig = len(intcos) Natom = len(geom) for i in range(Natom): intcos.append(cart.Cart(i, "X")) intcos.append(cart.Cart(i, "Y")) intcos.append(cart.Cart(i, "Z")) # return len(intcos) - Norig def linear_bend_check(o_molsys, dq): """ Searches fragments to identify bends which are quasi-linear but not previously identified as "linear bends". Parameters --------- o_molsys : MOLSYS class dq : ndarray Returns ------- list missing linear bends """ logger = logging.getLogger(__name__) linear_bends = [] for frag_index, frag in enumerate(o_molsys.fragments): for i, intco in enumerate(frag.intcos): if isinstance(intco, bend.Bend): new_val = intco.q(frag.geom) + dq[o_molsys.frag_1st_intco(frag_index) + i] A, B, C = intco.A, intco.B, intco.C # <ABC < 0. A-C-B
there is no Sigmaf, and no U+03A2 character either --> <!ENTITY Sigma "&#931;" ><!-- greek capital letter sigma, U+03A3 ISOgrk3 --> <!ENTITY Tau "&#932;" ><!-- greek capital letter tau, U+03A4 --> <!ENTITY Upsilon "&#933;" ><!-- greek capital letter upsilon, U+03A5 ISOgrk3 --> <!ENTITY Phi "&#934;" ><!-- greek capital letter phi, U+03A6 ISOgrk3 --> <!ENTITY Chi "&#935;" ><!-- greek capital letter chi, U+03A7 --> <!ENTITY Psi "&#936;" ><!-- greek capital letter psi, U+03A8 ISOgrk3 --> <!ENTITY Omega "&#937;" ><!-- greek capital letter omega, U+03A9 ISOgrk3 --> <!ENTITY alpha "&#945;" ><!-- greek small letter alpha, U+03B1 ISOgrk3 --> <!ENTITY beta "&#946;" ><!-- greek small letter beta, U+03B2 ISOgrk3 --> <!ENTITY gamma "&#947;" ><!-- greek small letter gamma, U+03B3 ISOgrk3 --> <!ENTITY delta "&#948;" ><!-- greek small letter delta, U+03B4 ISOgrk3 --> <!ENTITY epsilon "&#949;" ><!-- greek small letter epsilon, U+03B5 ISOgrk3 --> <!ENTITY zeta "&#950;" ><!-- greek small letter zeta, U+03B6 ISOgrk3 --> <!ENTITY eta "&#951;" ><!-- greek small letter eta, U+03B7 ISOgrk3 --> <!ENTITY theta "&#952;" ><!-- greek small letter theta, U+03B8 ISOgrk3 --> <!ENTITY iota "&#953;" ><!-- greek small letter iota, U+03B9 ISOgrk3 --> <!ENTITY kappa "&#954;" ><!-- greek small letter kappa, U+03BA ISOgrk3 --> <!ENTITY lambda "&#955;" ><!-- greek small letter lambda, U+03BB ISOgrk3 --> <!ENTITY mu "&#956;" ><!-- greek small letter mu, U+03BC ISOgrk3 --> <!ENTITY nu "&#957;" ><!-- greek small letter nu, U+03BD ISOgrk3 --> <!ENTITY xi "&#958;" ><!-- greek small letter xi, U+03BE ISOgrk3 --> <!ENTITY omicron "&#959;" ><!-- greek small letter omicron, U+03BF NEW --> <!ENTITY pi "&#960;" ><!-- greek small letter pi, U+03C0 ISOgrk3 --> <!ENTITY rho "&#961;" ><!-- greek small letter rho, U+03C1 ISOgrk3 --> <!ENTITY sigmaf "&#962;" ><!-- greek small letter final sigma, U+03C2 ISOgrk3 --> <!ENTITY sigma "&#963;" ><!-- greek small letter sigma, U+03C3 ISOgrk3 --> <!ENTITY tau "&#964;" ><!-- greek small letter tau, U+03C4 ISOgrk3 --> <!ENTITY upsilon "&#965;" ><!-- greek small letter upsilon, U+03C5 ISOgrk3 --> <!ENTITY phi "&#966;" ><!-- greek small letter phi, U+03C6 ISOgrk3 --> <!ENTITY chi "&#967;" ><!-- greek small letter chi, U+03C7 ISOgrk3 --> <!ENTITY psi "&#968;" ><!-- greek small letter psi, U+03C8 ISOgrk3 --> <!ENTITY omega "&#969;" ><!-- greek small letter omega, U+03C9 ISOgrk3 --> <!ENTITY thetasym "&#977;" ><!-- greek small letter theta symbol, U+03D1 NEW --> <!ENTITY upsih "&#978;" ><!-- greek upsilon with hook symbol, U+03D2 NEW --> <!ENTITY piv "&#982;" ><!-- greek pi symbol, U+03D6 ISOgrk3 --> <!-- General Punctuation --> <!ENTITY bull "&#8226;" ><!-- bullet = black small circle, U+2022 ISOpub --> <!-- bullet is NOT the same as bullet operator, U+2219 --> <!ENTITY hellip "&#8230;" ><!-- horizontal ellipsis = three dot leader, U+2026 ISOpub --> <!ENTITY prime "&#8242;" ><!-- prime = minutes = feet, U+2032 ISOtech --> <!ENTITY Prime "&#8243;" ><!-- double prime = seconds = inches, U+2033 ISOtech --> <!ENTITY oline "&#8254;" ><!-- overline = spacing overscore, U+203E NEW --> <!ENTITY frasl "&#8260;" ><!-- fraction slash, U+2044 NEW --> <!-- Letterlike Symbols --> <!ENTITY weierp "&#8472;" ><!-- script capital P = power set = Weierstrass p, U+2118 ISOamso --> <!ENTITY image "&#8465;" ><!-- blackletter capital I = imaginary part, U+2111 ISOamso --> <!ENTITY real "&#8476;" ><!-- blackletter capital R = real part symbol, U+211C ISOamso --> <!ENTITY trade "&#8482;" ><!-- trade mark sign, U+2122 ISOnum --> <!ENTITY alefsym "&#8501;" ><!-- alef symbol = first transfinite cardinal, U+2135 NEW --> <!-- alef symbol is NOT the same as hebrew letter alef, U+05D0 although the same glyph could be used to depict both characters --> <!-- Arrows --> <!ENTITY larr "&#8592;" ><!-- leftwards arrow, U+2190 ISOnum --> <!ENTITY uarr "&#8593;" ><!-- upwards arrow, U+2191 ISOnum--> <!ENTITY rarr "&#8594;" ><!-- rightwards arrow, U+2192 ISOnum --> <!ENTITY darr "&#8595;" ><!-- downwards arrow, U+2193 ISOnum --> <!ENTITY harr "&#8596;" ><!-- left right arrow, U+2194 ISOamsa --> <!ENTITY crarr "&#8629;" ><!-- downwards arrow with corner leftwards = carriage return, U+21B5 NEW --> <!ENTITY lArr "&#8656;" ><!-- leftwards double arrow, U+21D0 ISOtech --> <!-- Unicode does not say that lArr is the same as the 'is implied by' arrow but also does not have any other character for that function. So ? lArr can be used for 'is implied by' as ISOtech suggests --> <!ENTITY uArr "&#8657;" ><!-- upwards double arrow, U+21D1 ISOamsa --> <!ENTITY rArr "&#8658;" ><!-- rightwards double arrow, U+21D2 ISOtech --> <!-- Unicode does not say this is the 'implies' character but does not have another character with this function so ? rArr can be used for 'implies' as ISOtech suggests --> <!ENTITY dArr "&#8659;" ><!-- downwards double arrow, U+21D3 ISOamsa --> <!ENTITY hArr "&#8660;" ><!-- left right double arrow, U+21D4 ISOamsa --> <!-- Mathematical Operators --> <!ENTITY forall "&#8704;" ><!-- for all, U+2200 ISOtech --> <!ENTITY part "&#8706;" ><!-- partial differential, U+2202 ISOtech --> <!ENTITY exist "&#8707;" ><!-- there exists, U+2203 ISOtech --> <!ENTITY empty "&#8709;" ><!-- empty set = null set, U+2205 ISOamso --> <!ENTITY nabla "&#8711;" ><!-- nabla = backward difference, U+2207 ISOtech --> <!ENTITY isin "&#8712;" ><!-- element of, U+2208 ISOtech --> <!ENTITY notin "&#8713;" ><!-- not an element of, U+2209 ISOtech --> <!ENTITY ni "&#8715;" ><!-- contains as member, U+220B ISOtech --> <!-- should there be a more memorable name than 'ni'? --> <!ENTITY prod "&#8719;" ><!-- n-ary product = product sign, U+220F ISOamsb --> <!-- prod is NOT the same character as U+03A0 'greek capital letter pi' though the same glyph might be used for both --> <!ENTITY sum "&#8721;" ><!-- n-ary sumation, U+2211 ISOamsb --> <!-- sum is NOT the same character as U+03A3 'greek capital letter sigma' though the same glyph might be used for both --> <!ENTITY minus "&#8722;" ><!-- minus sign, U+2212 ISOtech --> <!ENTITY lowast "&#8727;" ><!-- asterisk operator, U+2217 ISOtech --> <!ENTITY radic "&#8730;" ><!-- square root = radical sign, U+221A ISOtech --> <!ENTITY prop "&#8733;" ><!-- proportional to, U+221D ISOtech --> <!ENTITY infin "&#8734;" ><!-- infinity, U+221E ISOtech --> <!ENTITY ang "&#8736;" ><!-- angle, U+2220 ISOamso --> <!ENTITY and "&#8743;" ><!-- logical and = wedge, U+2227 ISOtech --> <!ENTITY or "&#8744;" ><!-- logical or = vee, U+2228 ISOtech --> <!ENTITY cap "&#8745;" ><!-- intersection = cap, U+2229 ISOtech --> <!ENTITY cup "&#8746;" ><!-- union = cup, U+222A ISOtech --> <!ENTITY int "&#8747;" ><!-- integral, U+222B ISOtech --> <!ENTITY there4 "&#8756;" ><!-- therefore, U+2234 ISOtech --> <!ENTITY sim "&#8764;" ><!-- tilde operator = varies with = similar to, U+223C ISOtech --> <!-- tilde operator is NOT the same character as the tilde, U+007E, although the same glyph might be used to represent both --> <!ENTITY cong "&#8773;" ><!-- approximately equal to, U+2245 ISOtech --> <!ENTITY asymp "&#8776;" ><!-- almost equal to = asymptotic to, U+2248 ISOamsr --> <!ENTITY ne "&#8800;" ><!-- not equal to, U+2260 ISOtech --> <!ENTITY equiv "&#8801;" ><!-- identical to, U+2261 ISOtech --> <!ENTITY le "&#8804;" ><!-- less-than or equal to, U+2264 ISOtech --> <!ENTITY ge "&#8805;" ><!-- greater-than or equal to, U+2265 ISOtech --> <!ENTITY sub "&#8834;" ><!-- subset of, U+2282 ISOtech --> <!ENTITY sup "&#8835;" ><!-- superset of, U+2283 ISOtech --> <!-- note that nsup, 'not a superset of, U+2283' is not covered by the Symbol font encoding and is not included. Should it be, for symmetry? It is in ISOamsn --> <!ENTITY nsub "&#8836;" ><!-- not a subset of, U+2284 ISOamsn --> <!ENTITY sube "&#8838;" ><!-- subset of or equal to, U+2286 ISOtech --> <!ENTITY supe "&#8839;" ><!-- superset of or equal to, U+2287 ISOtech --> <!ENTITY oplus "&#8853;" ><!-- circled plus = direct sum, U+2295 ISOamsb --> <!ENTITY otimes "&#8855;" ><!-- circled times = vector product, U+2297 ISOamsb --> <!ENTITY perp "&#8869;" ><!-- up tack = orthogonal to = perpendicular, U+22A5 ISOtech --> <!ENTITY sdot "&#8901;" ><!-- dot operator, U+22C5 ISOamsb --> <!-- dot operator is NOT the same character as U+00B7 middle dot --> <!-- Miscellaneous Technical --> <!ENTITY lceil "&#8968;" ><!-- left ceiling = apl upstile, U+2308 ISOamsc --> <!ENTITY rceil "&#8969;" ><!-- right ceiling, U+2309 ISOamsc --> <!ENTITY lfloor "&#8970;" ><!-- left floor = apl downstile, U+230A ISOamsc --> <!ENTITY rfloor "&#8971;" ><!-- right floor, U+230B ISOamsc --> <!ENTITY lang "&#9001;" ><!-- left-pointing angle bracket = bra, U+2329 ISOtech --> <!-- lang is NOT the same character as U+003C 'less than' or U+2039 'single left-pointing angle quotation mark' --> <!ENTITY rang "&#9002;" ><!-- right-pointing angle bracket = ket, U+232A ISOtech --> <!-- rang is NOT the same character as U+003E 'greater than' or U+203A 'single right-pointing angle quotation mark' --> <!-- Geometric Shapes --> <!ENTITY loz "&#9674;" ><!-- lozenge, U+25CA ISOpub
}, ) z: Optional["Sensor.Magnetometer.Z"] = field( default=None, metadata={ "type": "Element", "namespace": "", }, ) @dataclass class X: """ Parameters related to the body-frame X axis of the magnetometer. Parameters ---------- noise: The properties of a sensor noise model. """ noise: Optional["Sensor.Magnetometer.X.Noise"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Noise: """ The properties of a sensor noise model. Parameters ---------- mean: For type "gaussian*", the mean of the Gaussian distribution from which noise values are drawn. stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which noise values are drawn. bias_mean: For type "gaussian*", the mean of the Gaussian distribution from which bias values are drawn. bias_stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which bias values are drawn. precision: For type "gaussian_quantized", the precision of output signals. A value of zero implies infinite precision / no quantization. type: The type of noise. Currently supported types are: "none" (no noise). "gaussian" (draw noise values independently for each measurement from a Gaussian distribution). "gaussian_quantized" ("gaussian" plus quantization of outputs (ie. rounding)) """ mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) precision: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) type: Optional[str] = field( default=None, metadata={ "type": "Attribute", "required": True, }, ) @dataclass class Y: """ Parameters related to the body-frame Y axis of the magnetometer. Parameters ---------- noise: The properties of a sensor noise model. """ noise: Optional["Sensor.Magnetometer.Y.Noise"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Noise: """ The properties of a sensor noise model. Parameters ---------- mean: For type "gaussian*", the mean of the Gaussian distribution from which noise values are drawn. stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which noise values are drawn. bias_mean: For type "gaussian*", the mean of the Gaussian distribution from which bias values are drawn. bias_stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which bias values are drawn. precision: For type "gaussian_quantized", the precision of output signals. A value of zero implies infinite precision / no quantization. type: The type of noise. Currently supported types are: "none" (no noise). "gaussian" (draw noise values independently for each measurement from a Gaussian distribution). "gaussian_quantized" ("gaussian" plus quantization of outputs (ie. rounding)) """ mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) precision: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) type: Optional[str] = field( default=None, metadata={ "type": "Attribute", "required": True, }, ) @dataclass class Z: """ Parameters related to the body-frame Z axis of the magnetometer. Parameters ---------- noise: The properties of a sensor noise model. """ noise: Optional["Sensor.Magnetometer.Z.Noise"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Noise: """ The properties of a sensor noise model. Parameters ---------- mean: For type "gaussian*", the mean of the Gaussian distribution from which noise values are drawn. stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which noise values are drawn. bias_mean: For type "gaussian*", the mean of the Gaussian distribution from which bias values are drawn. bias_stddev: For type "gaussian*", the standard deviation of the Gaussian distribution from which bias values are drawn. precision: For type "gaussian_quantized", the precision of output signals. A value of zero implies infinite precision / no quantization. type: The type of noise. Currently supported types are: "none" (no noise). "gaussian" (draw noise values independently for each measurement from a Gaussian distribution). "gaussian_quantized" ("gaussian" plus quantization of outputs (ie. rounding)) """ mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_mean: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) bias_stddev: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) precision: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) type: Optional[str] = field( default=None, metadata={ "type": "Attribute", "required": True, }, ) @dataclass class Ray: """ These elements are specific to the ray (laser) sensor. Parameters ---------- scan: range: specifies range properties of each simulated ray noise: The properties of the noise model that should be applied to generated scans """ scan: Optional["Sensor.Ray.Scan"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) range: Optional["Sensor.Ray.Range"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) noise: Optional["Sensor.Ray.Noise"] = field( default=None, metadata={ "type": "Element", "namespace": "", }, ) @dataclass class Scan: horizontal: Optional["Sensor.Ray.Scan.Horizontal"] = field( default=None, metadata={ "type": "Element", "namespace": "", "required": True, }, ) vertical: Optional["Sensor.Ray.Scan.Vertical"] = field( default=None, metadata={ "type": "Element", "namespace": "", }, ) @dataclass class Horizontal: """ Parameters ---------- samples: The number of simulated rays to generate per complete laser sweep cycle. resolution: This number is multiplied by samples to determine the number of range data points returned. If resolution is less than one, range data is interpolated. If resolution is greater than one, range data is averaged. min_angle: max_angle: Must be greater or equal to min_angle """ samples: int = field( default=640, metadata={ "type": "Element", "namespace": "", "required": True, }, ) resolution: float = field( default=1.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) min_angle: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) max_angle: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Vertical: """ Parameters ---------- samples: The number of simulated rays to generate per complete laser sweep cycle. resolution: This number is multiplied by samples to determine the number of range data points returned. If resolution is less than one, range data is interpolated. If resolution is greater than one, range data is averaged. min_angle: max_angle: Must be greater or equal to min_angle """ samples: int = field( default=1, metadata={ "type": "Element", "namespace": "", "required": True, }, ) resolution: float = field( default=1.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) min_angle: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) max_angle: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Range: """ specifies range properties of each simulated ray. Parameters ---------- min: The minimum distance for each ray. max: The maximum distance for each ray. resolution: Linear resolution of each ray. """ min: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) max: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) resolution: float = field( default=0.0, metadata={ "type": "Element", "namespace": "", "required": True, }, ) @dataclass class Noise: """ The properties of the noise model that should be applied to generated scans. Parameters ---------- type: The type of noise. Currently supported types are: "gaussian" (draw noise values independently for each beam from a Gaussian distribution). mean: For type "gaussian," the mean of the Gaussian distribution from which noise values are drawn. stddev: For type "gaussian," the standard deviation of the Gaussian distribution from which noise values are drawn. """ type: str = field( default="gaussian", metadata={ "type": "Element", "namespace": "",
'type': '[str]'}, 'txt_records': {'key': 'properties.txtRecords', 'type': '[str]'}, 'a_records': {'key': 'properties.aRecords', 'type': '[str]'}, 'alternate_cname_records': {'key': 'properties.alternateCNameRecords', 'type': '[str]'}, 'alternate_txt_records': {'key': 'properties.alternateTxtRecords', 'type': '[str]'}, } def __init__(self, **kwargs): super(CustomHostnameAnalysisResult, self).__init__(**kwargs) self.host_name = None self.is_hostname_already_verified = None self.custom_domain_verification_test = None self.custom_domain_verification_failure_info = None self.has_conflict_on_managed_environment = None self.conflicting_container_app_resource_id = None self.c_name_records = kwargs.get('c_name_records', None) self.txt_records = kwargs.get('txt_records', None) self.a_records = kwargs.get('a_records', None) self.alternate_cname_records = kwargs.get('alternate_cname_records', None) self.alternate_txt_records = kwargs.get('alternate_txt_records', None) class CustomOpenIdConnectProvider(Model): """The configuration settings of the custom Open ID Connect provider. :param state: <code>Disabled</code> if the custom Open ID Connect provider should not be enabled despite the set registration; otherwise, <code>Enabled</code>. Possible values include: 'Enabled', 'Disabled' :type state: str or ~commondefinitions.models.IdentityProviderState :param registration: The configuration settings of the app registration for the custom Open ID Connect provider. :type registration: ~commondefinitions.models.OpenIdConnectRegistration :param login: The configuration settings of the login flow of the custom Open ID Connect provider. :type login: ~commondefinitions.models.OpenIdConnectLogin """ _attribute_map = { 'state': {'key': 'state', 'type': 'str'}, 'registration': {'key': 'registration', 'type': 'OpenIdConnectRegistration'}, 'login': {'key': 'login', 'type': 'OpenIdConnectLogin'}, } def __init__(self, **kwargs): super(CustomOpenIdConnectProvider, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.registration = kwargs.get('registration', None) self.login = kwargs.get('login', None) class CustomScaleRule(Model): """Container App container Custom scaling rule. :param type: Type of the custom scale rule eg: azure-servicebus, redis etc. :type type: str :param metadata: Metadata properties to describe custom scale rule. :type metadata: dict[str, str] :param auth: Authentication secrets for the custom scale rule. :type auth: list[~commondefinitions.models.ScaleRuleAuth] """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': '{str}'}, 'auth': {'key': 'auth', 'type': '[ScaleRuleAuth]'}, } def __init__(self, **kwargs): super(CustomScaleRule, self).__init__(**kwargs) self.type = kwargs.get('type', None) self.metadata = kwargs.get('metadata', None) self.auth = kwargs.get('auth', None) class Dapr(Model): """Container App Dapr configuration. :param enabled: Boolean indicating if the Dapr side car is enabled :type enabled: bool :param app_id: Dapr application identifier :type app_id: str :param app_protocol: Tells Dapr which protocol your application is using. Valid options are http and grpc. Default is http. Possible values include: 'http', 'grpc' :type app_protocol: str or ~commondefinitions.models.AppProtocol :param app_port: Tells Dapr which port your application is listening on :type app_port: int """ _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'app_id': {'key': 'appId', 'type': 'str'}, 'app_protocol': {'key': 'appProtocol', 'type': 'str'}, 'app_port': {'key': 'appPort', 'type': 'int'}, } def __init__(self, **kwargs): super(Dapr, self).__init__(**kwargs) self.enabled = kwargs.get('enabled', None) self.app_id = kwargs.get('app_id', None) self.app_protocol = kwargs.get('app_protocol', None) self.app_port = kwargs.get('app_port', None) class DaprComponent(ProxyResource): """Dapr Component. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} :vartype id: str :ivar name: The name of the resource :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~commondefinitions.models.SystemData :param component_type: Component type :type component_type: str :param version: Component version :type version: str :param ignore_errors: Boolean describing if the component errors are ignores :type ignore_errors: bool :param init_timeout: Initialization timeout :type init_timeout: str :param secrets: Collection of secrets used by a Dapr component :type secrets: list[~commondefinitions.models.Secret] :param metadata: Component metadata :type metadata: list[~commondefinitions.models.DaprMetadata] :param scopes: Names of container apps that can use this Dapr component :type scopes: list[str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'component_type': {'key': 'properties.componentType', 'type': 'str'}, 'version': {'key': 'properties.version', 'type': 'str'}, 'ignore_errors': {'key': 'properties.ignoreErrors', 'type': 'bool'}, 'init_timeout': {'key': 'properties.initTimeout', 'type': 'str'}, 'secrets': {'key': 'properties.secrets', 'type': '[Secret]'}, 'metadata': {'key': 'properties.metadata', 'type': '[DaprMetadata]'}, 'scopes': {'key': 'properties.scopes', 'type': '[str]'}, } def __init__(self, **kwargs): super(DaprComponent, self).__init__(**kwargs) self.component_type = kwargs.get('component_type', None) self.version = kwargs.get('version', None) self.ignore_errors = kwargs.get('ignore_errors', None) self.init_timeout = kwargs.get('init_timeout', None) self.secrets = kwargs.get('secrets', None) self.metadata = kwargs.get('metadata', None) self.scopes = kwargs.get('scopes', None) class DaprComponentsCollection(Model): """Dapr Components ARM resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param value: Required. Collection of resources. :type value: list[~commondefinitions.models.DaprComponent] :ivar next_link: Link to next page of resources. :vartype next_link: str """ _validation = { 'value': {'required': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[DaprComponent]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(self, **kwargs): super(DaprComponentsCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class DaprMetadata(Model): """Dapr component metadata. :param name: Metadata property name. :type name: str :param value: Metadata property value. :type value: str :param secret_ref: Name of the Dapr Component secret from which to pull the metadata property value. :type secret_ref: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, 'secret_ref': {'key': 'secretRef', 'type': 'str'}, } def __init__(self, **kwargs): super(DaprMetadata, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) self.secret_ref = kwargs.get('secret_ref', None) class DefaultErrorResponse(Model): """App Service error response. Variables are only populated by the server, and will be ignored when sending a request. :ivar error: Error model. :vartype error: ~commondefinitions.models.DefaultErrorResponseError """ _validation = { 'error': {'readonly': True}, } _attribute_map = { 'error': {'key': 'error', 'type': 'DefaultErrorResponseError'}, } def __init__(self, **kwargs): super(DefaultErrorResponse, self).__init__(**kwargs) self.error = None class DefaultErrorResponseException(HttpOperationError): """Server responsed with exception of type: 'DefaultErrorResponse'. :param deserialize: A deserializer :param response: Server response to be deserialized. """ def __init__(self, deserialize, response, *args): super(DefaultErrorResponseException, self).__init__(deserialize, response, 'DefaultErrorResponse', *args) class DefaultErrorResponseError(Model): """Error model. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: Standardized string to programmatically identify the error. :vartype code: str :ivar message: Detailed error description and debugging information. :vartype message: str :ivar target: Detailed error description and debugging information. :vartype target: str :param details: Details or the error :type details: list[~commondefinitions.models.DefaultErrorResponseErrorDetailsItem] :ivar innererror: More information to debug error. :vartype innererror: str """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'innererror': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[DefaultErrorResponseErrorDetailsItem]'}, 'innererror': {'key': 'innererror', 'type': 'str'}, } def __init__(self, **kwargs): super(DefaultErrorResponseError, self).__init__(**kwargs) self.code = None self.message = None self.target = None self.details = kwargs.get('details', None) self.innererror = None class DefaultErrorResponseErrorDetailsItem(Model): """Detailed errors. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: Standardized string to programmatically identify the error. :vartype code: str :ivar message: Detailed error description and debugging information. :vartype message: str :ivar target: Detailed error description and debugging information. :vartype target: str """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, } def __init__(self, **kwargs): super(DefaultErrorResponseErrorDetailsItem, self).__init__(**kwargs) self.code = None self.message = None self.target = None class EnvironmentVar(Model): """Container App container environment variable. :param name: Environment variable name. :type name: str :param value: Non-secret environment variable value. :type value: str :param secret_ref: Name of the Container App secret from which to pull the environment variable value. :type secret_ref: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, 'secret_ref': {'key': 'secretRef', 'type': 'str'}, } def __init__(self, **kwargs): super(EnvironmentVar, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) self.secret_ref = kwargs.get('secret_ref', None) class Facebook(Model): """The configuration settings of the Facebook provider. :param state: <code>Disabled</code> if the Facebook provider should not be enabled despite the set registration; otherwise, <code>Enabled</code>. Possible values include: 'Enabled', 'Disabled' :type state: str or ~commondefinitions.models.IdentityProviderState :param registration: The configuration settings of the app registration for the Facebook provider. :type registration: ~commondefinitions.models.AppRegistration :param graph_api_version: The version of the Facebook api to be used while logging in. :type graph_api_version: str :param login: The configuration settings of the login flow. :type login: ~commondefinitions.models.LoginScopes """ _attribute_map = { 'state': {'key':
<filename>src/dc_federated/backend/dcf_server.py<gh_stars>1-10 """ Defines the core server class for the federated learning. Abstracts away the lower level server logic from the federated machine learning logic. """ import gevent from gevent import monkey; monkey.patch_all() from gevent import Greenlet, queue, pool import os import json import os.path import zlib import msgpack import hashlib from bottle import Bottle, run, request, response, auth_basic, ServerAdapter from dc_federated.backend._constants import * from dc_federated.backend.backend_utils import * from dc_federated.utils import get_host_ip from dc_federated.backend.backend_utils import is_valid_model_dict from dc_federated.backend._worker_manager import WorkerManager import logging logger = logging.getLogger(__name__) logger.setLevel(level=logging.INFO) class DCFServer(object): """ This class abstracts away the lower level communication logic for the central server/node from the actual federated learning logic. It interacts with the central server node via the 4 callback functions passed in the constructor. For an example usage please refer to the package dc_federated.example_dcf+model. Parameters ---------- register_worker_callback: This function is expected to take the id of a newly registered worker and should contain the application specific logic for dealing with a new worker joining the federated learning pool. unregister_worker_callback: This function is expected to take the id of a newly unregistered worker and should contain the application specific logic for dealing with a worker leaving the federated learning pool. return_global_model_callback: () -> dict This function is expected to return a dictionary with the GLOBAL_MODEL: containing the serialization of the global model GLOBAL_MODEL_VERSION: the global model version (algorithm specific). is_global_model_most_recent: str -> bool Returns the True if the model version given in the string is the most recent one - otherwise returns False. receive_worker_update_callback: dict -> bool This function should receive a worker-id and an application dependent binary serialized update from the worker. The server code ensures that the worker-id was previously registered. server_mode_safe: bool Whether or not the server should be in safe of unsafe mode. Safe mode does not allow unauthenticated workers with the optional initial set of public keys passed via the key_list_parameters. Raises an exception if server started in unsafe mode and key_list_file is not None. key_list_file: str The name of the file containing the public keys for valid workers. The public keys are given one key per line, with each key being generated by the worker_key_pair_tool.py tool. If None, then no authentication is performed. load_last_session_workers: bool (default True) When running in safe mode, whether or not to load the workers from the previous session. path_to_keys_db: str Path to the database of workers' public keys that has been added. server_host_ip: str (default None) The ip-address of the host of the server. If None, then it uses the ip-address of the current machine. server_port: int (default 8080) The port at which the serer should listen to. If None, then it uses the port 8080. ssl_enabled: bool (default False) Enable SSL/TLS for server/workers communications. ssl_keyfile: str Must be a valid path to the key file. This is mandatory if ssl_enabled, ignored otherwise. ssl_certfile: str Must be a valid path to the certificate. This is mandatory if ssl_enabled, ignored otherwise. model_check_interval: int The interval of time between the server checking for an updated model for the long polling. """ def __init__( self, register_worker_callback, unregister_worker_callback, return_global_model_callback, is_global_model_most_recent, receive_worker_update_callback, server_mode_safe, key_list_file, load_last_session_workers=True, path_to_keys_db='.keys_db.json', server_host_ip=None, server_port=8080, ssl_enabled=False, ssl_keyfile=None, ssl_certfile=None, model_check_interval=10, debug=False ): self.server_host_ip = get_host_ip() if server_host_ip is None else server_host_ip self.server_port = server_port self.register_worker_callback = register_worker_callback self.unregister_worker_callback = unregister_worker_callback self.return_global_model_callback = return_global_model_callback self.is_global_model_most_recent = is_global_model_most_recent self.receive_worker_update_callback = receive_worker_update_callback self.worker_manager = WorkerManager(server_mode_safe, key_list_file, load_last_session_workers, path_to_keys_db) self.gevent_pool = pool.Pool(None) self.model_version_req_dict = {} self.model_check_interval = model_check_interval self.debug = debug self.ssl_enabled = ssl_enabled if ssl_enabled: if ssl_certfile is None or ssl_keyfile is None: raise RuntimeError( "When ssl is enabled, both a certfile and keyfile must be provided") if not os.path.isfile(ssl_certfile): raise IOError( "The provided SSL certificate file doesn't exist") if not os.path.isfile(ssl_keyfile): raise IOError("The provided SSL key file doesn't exist") self.ssl_keyfile = ssl_keyfile self.ssl_certfile = ssl_certfile @staticmethod def is_admin(username, password): """ Callback for bottle to check that the requester is authorized to act as an admin for the server. Parameters ---------- username: str The admin username. password: str The admin password. Returns ------- bool: True if the user/password us valid, false otherwise. """ adm_username = os.environ.get(ADMIN_USERNAME) adm_password = os.environ.get(ADMIN_PASSWORD) if adm_username is None or adm_password is None: return False return username == adm_username and password == adm_password @staticmethod def validate_input(dct, keys, data_types): """ Validates the given input dictionary dct by ensuring that all the keys are in dct and they have the corresponding types. Parameters ---------- dct: object The object to verify as a dictionary keys: str list Lisf of keys to test for. data_types: The types of the elements in the keys. Returns ------- dict: The keys for which the checks failed - otherwise """ inp_invalid = verify_dict(dct, keys, data_types) if len(inp_invalid) > 0: error_str = "Invalid input: failed to get the following keys from JSON input: " \ f"{inp_invalid}" return { ERROR_MESSAGE_KEY: error_str } else: return {} def add_and_register_worker(self): """ Registers the worker, adding it to the list of allowed workers if necessary. Returns ------- str: The id of the new client, or INVALID_WORKER if the process failed. """ worker_data = request.json valid_failed = DCFServer.validate_input(worker_data, [PUBLIC_KEY_STR], [str]) if ERROR_MESSAGE_KEY in valid_failed: logger.error(valid_failed[ERROR_MESSAGE_KEY]) return valid_failed[ERROR_MESSAGE_KEY] signed_phrase = "" if SIGNED_PHRASE not in worker_data else worker_data[SIGNED_PHRASE] worker_id, success = \ self.worker_manager.authenticate_and_add_worker(worker_data[PUBLIC_KEY_STR], signed_phrase) if worker_id == INVALID_WORKER: return worker_id if not self.worker_manager.is_worker_registered(worker_id): self.worker_manager.set_registration_status(worker_id, True) self.register_worker_callback(worker_id) return worker_id def admin_list_workers(self): """ List all registered workers Returns ------- str: JSON in string form containing id of workers and their registration status. """ return json.dumps(self.worker_manager.get_worker_list()) def admin_add_worker(self): """ Add a new worker to the list or allowed workers via the admin API. JSON Body: public_key_str: string The public key associated with the worker Returns ------- str: JSON in string form either containing the id of the worker added + its registration status or an error message if that failed. """ worker_data = request.json valid_failed = DCFServer.validate_input(worker_data, [PUBLIC_KEY_STR, REGISTRATION_STATUS_KEY], [str, bool]) if ERROR_MESSAGE_KEY in valid_failed: logger.error(valid_failed[ERROR_MESSAGE_KEY]) return json.dumps(valid_failed) logger.info("Admin is adding a new worker...") worker_id, success = self.worker_manager.add_worker(worker_data[PUBLIC_KEY_STR]) if worker_id == INVALID_WORKER: err_msg = f"Unable to validate public key (short) {worker_data[PUBLIC_KEY_STR][0:WID_LEN]} "\ "- worker not added." logger.warning(err_msg) return json.dumps({ ERROR_MESSAGE_KEY: err_msg }) if not success: return json.dumps({ERROR_MESSAGE_KEY: f"Worker {worker_id[0:WID_LEN]} already exists."}) worker_id = self.worker_manager.set_registration_status( worker_id, worker_data[REGISTRATION_STATUS_KEY]) if worker_id == INVALID_WORKER: error_str = message_seriously_wrong("worker was just added but now being reported as not added") logger.error(error_str) return json.dumps({ERROR_MESSAGE_KEY: error_str}) if worker_data[REGISTRATION_STATUS_KEY]: self.register_worker_callback(worker_id) return json.dumps({ SUCCESS_MESSAGE_KEY: f"Successfully added worker {worker_id[0:WID_LEN]}.", WORKER_ID_KEY: worker_id, REGISTRATION_STATUS_KEY: worker_data[REGISTRATION_STATUS_KEY] }) def admin_delete_worker(self, worker_id): """ Delete a new worker from the list of allowed workers via the admin API. Parameters ---------- worker_id: str The id of the worker to delete Returns ------- str: JSON in string form containing either id of worker removed or error message if the operation failed for some reason. """ logger.info(f"Admin is removing worker {worker_id[0:WID_LEN]}...") was_registered = self.worker_manager.is_worker_registered(worker_id) worker_id = self.worker_manager.set_registration_status(worker_id, False) if worker_id != INVALID_WORKER: if was_registered: self.unregister_worker_callback(worker_id) logger.info(f"Worker {worker_id[0:WID_LEN]} was unregistered (removal)") worker_id = self.worker_manager.remove_worker(worker_id) if worker_id == INVALID_WORKER: return json.dumps({ERROR_MESSAGE_KEY: f"Attempt to remove unknown worker {worker_id[0:WID_LEN]}."}) return json.dumps({ WORKER_ID_KEY: worker_id, SUCCESS_MESSAGE_KEY: f"Successfully removed worker {worker_id[0:WID_LEN]}." }) def admin_set_worker_status(self, worker_id): """ Set worker status to (REGISTRATION_STATUS_KEY = True or False) via the admin API. Parameters ---------- worker_id: str The id of the worker to set the status for. Returns ------- str: JSON in string form containing either id of worker removed and registration status or error message if the operation failed for some reason. """ worker_data = request.json logger.info(f"Admin is setting the status of {worker_id[0:WID_LEN]}...") valid_failed = DCFServer.validate_input(worker_data, [REGISTRATION_STATUS_KEY], [bool]) if ERROR_MESSAGE_KEY in valid_failed: logger.error(valid_failed[ERROR_MESSAGE_KEY]) return json.dumps(valid_failed) was_registered = self.worker_manager.is_worker_registered(worker_id) worker_id = self.worker_manager.set_registration_status( worker_id, worker_data[REGISTRATION_STATUS_KEY]) if worker_id == INVALID_WORKER: return json.dumps({ ERROR_MESSAGE_KEY: f"Attempt at changing worker status failed - " f"please ensure this worker was added: {worker_id[0:WID_LEN]}." }) logger.info(f"New {worker_id[0:WID_LEN]} status is {REGISTRATION_STATUS_KEY}: " f"{worker_data[REGISTRATION_STATUS_KEY]}") if not was_registered and worker_data[REGISTRATION_STATUS_KEY]: self.register_worker_callback(worker_id) if was_registered and not worker_data[REGISTRATION_STATUS_KEY]: self.unregister_worker_callback(worker_id) return json.dumps({ SUCCESS_MESSAGE_KEY: f"Successfully changed status for worker {worker_id[0:WID_LEN]}.", WORKER_ID_KEY: worker_id, REGISTRATION_STATUS_KEY:
hint is *NOT* a metahint. # ..............{ FORWARDREF }.............. # If this hint is a forward reference... elif hint_curr_sign is HintSignForwardRef: # Possibly unqualified classname referred to by this hint. hint_curr_forwardref_classname = get_hint_forwardref_classname( hint_curr) # If this classname contains one or more "." characters, this # classname is fully-qualified. In this case... if '.' in hint_curr_forwardref_classname: # Pass the beartypistry singleton as a private # "__beartypistry" parameter to this wrapper function. func_wrapper_locals[ARG_NAME_TYPISTRY] = bear_typistry # Python expression evaluating to this class when accessed # via the private "__beartypistry" parameter. hint_curr_expr = register_typistry_forwardref( hint_curr_forwardref_classname) # Else, this classname is unqualified. In this case... else: # If the set of unqualified classnames referred to by all # relative forward references has yet to be instantiated, # do so. if hints_forwardref_class_basename is None: hints_forwardref_class_basename = set() # In any case, this set now exists. # Add this unqualified classname to this set. hints_forwardref_class_basename.add( hint_curr_forwardref_classname) # Placeholder substring to be replaced by the caller with a # Python expression evaluating to this unqualified # classname canonicalized relative to the module declaring # the currently decorated callable when accessed via the # private "__beartypistry" parameter. hint_curr_expr = ( f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX}' f'{hint_curr_forwardref_classname}' f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX}' ) # Code type-checking the current pith against this class. func_curr_code = _PEP_CODE_CHECK_HINT_NONPEP_TYPE_format( pith_curr_expr=pith_curr_expr, hint_curr_expr=hint_curr_expr, ) # Else, this hint is *NOT* a forward reference. # ..............{ GENERIC or PROTOCOL }.............. # If this hint is either a: # * PEP 484-compliant generic (i.e., user-defined class subclassing # a combination of one or more of the "typing.Generic" superclass # and other "typing" non-class pseudo-superclasses). # * PEP 544-compliant protocol (i.e., class subclassing a # combination of one or more of the "typing.Protocol" superclass # and other "typing" non-class pseudo-superclasses). # * PEP 585-compliant generic (i.e., user-defined class subclassing # at least one non-class PEP 585-compliant pseudo-superclasses). # Then this hint is a PEP-compliant generic. In this case... elif hint_curr_sign is HintSignGeneric: #FIXME: *THIS IS NON-IDEAL.* Ideally, we should propagate *ALL* #child type hints subscripting a generic up to *ALL* #pseudo-superclasses of that generic (e.g., the "int" child #hint subscripting a parent hint "MuhGeneric[int]" of type #"class MuhGeneric(list[T]): pass" up to its "list[T]" #pseudo-superclass). # #For now, we just strip *ALL* child type hints subscripting a #generic with the following call. This suffices, because we #just need this to work. So it goes, uneasy code bedfellows. # If this hint is *NOT* a class, this hint is *NOT* an # unsubscripted generic but could still be a generic # subscripted by one or more PEP-compliant child type hints. # # To decide, reduce this hint to the object originating this # hint if any, enabling the subsequent assertion to assert # whether this origin object is an unsubscripted generic, which # would then imply this hint to be a subscripted generic. If # this strikes you as insane, you're not alone. hint_curr = get_hint_pep_generic_type_or_none(hint_curr) # Assert this hint to be a class. assert isinstance(hint_curr, type), ( f'{hint_curr_label} {repr(hint_curr)} generic not class.') # Tuple of the one or more unerased pseudo-superclasses # originally listed as superclasses prior to their type erasure # subclassed by this generic. hint_childs = get_hint_pep_generic_bases_unerased(hint_curr) # Initialize the code type-checking the current pith against # this generic to the substring prefixing all such code. func_curr_code = _PEP_CODE_CHECK_HINT_GENERIC_PREFIX # For each pseudo-superclass subclassed by this generic... for hint_child in hint_childs: # print(f'hint_child: {repr(hint_child)} {is_hint_pep_type_typing(hint_child)}') # If this pseudo-superclass is an actual class, this class # is effectively ignorable. Why? Because the # "_PEP_CODE_CHECK_HINT_GENERIC_PREFIX" snippet leveraged # above already type-checks this pith against the generic # subclassing this superclass and thus this superclass as # well with a trivial isinstance() call. In this case, skip # to the next pseudo-superclass. if isinstance(hint_child, type): continue # Else, this pseudo-superclass is *NOT* an actual class. # # If this pseudo-superclass is neither a PEP 585-compliant # type hint *NOR* a PEP-compliant type hint defined by the # "typing" module, this pseudo-superclass *MUST* be a PEP # 585-noncompliant user-defined pseudo-superclass. In this # case, reduce this pseudo-superclass to the corresponding # actual superclass originating this pseudo-superclass. # # Note that: # * This horrible, irrational, and unintuitive edge case # arises *ONLY* for user-defined PEP 484-compliant # generics and PEP 544-compliant protocols subclassing # another user-defined generic or protocol superclass # subscripted by one or more type variables: e.g., # >>> import typing as t # >>> class UserProtocol(t.Protocol[t.AnyStr]): pass # >>> class UserSubprotocol(UserProtocol[str], t.Protocol): pass # >>> UserSubprotocol.__orig_bases__ # (UserProtocol[bytes], typing.Protocol) # >>> UserProtocolUnerased = UserSubprotocol.__orig_bases__[0] # >>> UserProtocolUnerased is UserProtocol # False # >>> isinstance(UserProtocolUnerased, type) # False # * PEP 585-compliant generics suffer no such issues: # >>> from beartype._util.hint.pep.proposal.utilhintpep585 import is_hint_pep585_builtin # >>> class UserGeneric(list[int]): pass # >>> class UserSubgeneric(UserGeneric[int]): pass # >>> UserSubgeneric.__orig_bases__ # (UserGeneric[int],) # >>> UserGenericUnerased = UserSubgeneric.__orig_bases__[0] # >>> isinstance(UserGenericUnerased, type) # True # >>> UserGenericUnerased.__mro__ # (UserGeneric, list, object) # >>> is_hint_pep585_builtin(UserGenericUnerased) # True # # Walking up the unerased inheritance hierarchy for this # generic or protocol iteratively visits the user-defined # generic or protocol pseudo-superclass subscripted by one # or more type variable. Due to poorly defined obscurities # in the "typing" implementation, this pseudo-superclass is # *NOT* actually a class but rather an instance of a # private "typing" class (e.g., "typing._SpecialForm"). # # Ergo, this pseudo-superclass will be subsequently # detected as neither a generic nor "typing" object and # thus raise exceptions. Our only recourse is to silently # reduce this hint into the erased superclass to which the # "typing" module previously transformed this hint (e.g., # "UserProtocol" above). This is slightly non-ideal, as # this erased superclass is an actual class that should # ideally be ignored rather than redundantly tested against # the current pith again. Nonetheless, there exists no # other means of recursing into the possibly relevant # superclasses of this erased superclass. # # Note that, in theory, we could deeply refactor this # algorithm to support the notion of child hints that # should be ignored for purposes of type-checking but # nonetheless recursed into. In practice, the current # approach only introduces mild runtime inefficiencies # while preserving sanity throughout this algorithm. # # Specifically, perform this awful reduction *ONLY* if # this child hint is a PEP 484- or 544-compliant # user-defined pseudo-superclass that is neither... elif not ( # A PEP 585-compliant pseudo-superclass *NOR*... is_hint_pep585_builtin(hint_child) and # A PEP 484- or 544-compliant pseudo-superclass defined # by the "typing" module. is_hint_pep_typing(hint_child) ): hint_child = ( get_hint_pep484_generic_base_erased_from_unerased( hint_child)) # Else, this pseudo-superclass is defined by the "typing" # module. # If this superclass is ignorable, do so. if is_hint_ignorable(hint_child): continue # Else, this superclass is unignorable. # Generate and append code type-checking this pith against # this superclass. func_curr_code += ( _PEP_CODE_CHECK_HINT_GENERIC_CHILD_format( hint_child_placeholder=_enqueue_hint_child( # Python expression efficiently reusing the # value of this pith previously assigned to a # local variable by the prior prefix. pith_curr_assigned_expr), )) # Munge this code to... func_curr_code = ( # Strip the erroneous " and" suffix appended by the last # child hint from this code. f'{func_curr_code[:_LINE_RSTRIP_INDEX_AND]}' # Suffix this code by the substring suffixing all such # code. f'{_PEP_CODE_CHECK_HINT_GENERIC_SUFFIX}' # Format... ).format( # Indentation deferred above for efficiency. indent_curr=indent_curr, pith_curr_assign_expr=pith_curr_assign_expr, # Python expression evaluating to this generic type. hint_curr_expr=add_func_scope_type( cls=hint_curr, cls_scope=func_wrapper_locals, cls_label=_FUNC_WRAPPER_LOCAL_LABEL, ), ) # print(f'{hint_curr_label} PEP generic {repr(hint)} handled.') # Else, this hint is *NOT* a generic. # ..............{ LITERAL }.............. # If this hint is a PEP 586-compliant type hint (i.e., the # "typing.Literal" singleton subscripted by one or more literal # objects), this hint is largely useless and thus intentionally # detected last. Why? Because "typing.Literal" is subscriptable by # objects that are instances of
"""Utilities for interacting with GitHub""" import os import json import webbrowser import stat import sys from git import Repo from .context import Context event_dict = { "added_to_project": ( lambda event: "{} added the issue to a project.".format(event["actor"]["login"]) ), "assigned": ( lambda event: "{} assigned the issue to {}.".format( event["actor"]["login"], event["assignee"]["login"] ) ), "closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])), "converted_note_to_issue": ( lambda event: "{} created this issue from a note.".format( event["actor"]["login"] ) ), "demilestoned": (lambda event: "The issue was removed from a milestone."), "head_ref_deleted": (lambda event: "The pull request's branch was deleted."), "head_ref_restored": (lambda event: "The pull request's branch was restored."), "labelled": ( lambda event: "{} added {} label to the issue.".format( event["actor"]["login"], event["label"] ) ), "locked": ( lambda event: "The issue was locked by {}.".format(event["actor"]["login"]) ), "mentioned": ( lambda event: "{} was mentioned in the issue's body.".format( event["actor"]["login"] ) ), "marked_as_duplicate": ( lambda event: "The issue was marked duplicate by {}.".format( event["actor"]["login"] ) ), "merged": ( lambda event: "The issue was merged by {}.".format(event["actor"]["login"]) ), "milestoned": (lambda event: "The issue was added to a milestone."), "moved_columns_in_project": ( lambda event: "The issue was moved between columns in a project board." ), "referenced": (lambda event: "The issue was referenced from a commit message."), "renamed": (lambda event: "The title of the issue was changed."), "reopened": ( lambda event: "The issue was reopened by {}".format(event["actor"]["login"]) ), "review_dismissed": ( lambda event: "{} dismissed a review from the pull request.".format( event["actor"]["login"] ) ), "review_requested": ( lambda event: "{} requested review from the subject on this pull request.".format( event["actor"]["login"] ) ), "review_request_removed": ( lambda event: "{} removed the review request for the subject on this pull request.".format( event["actor"]["login"] ) ), "subscribed": ( lambda event: "{} subscribed to receive notifications for the issue.".format( event["actor"]["login"] ) ), "transferred": (lambda event: "The issue was transferred to another repository."), "unassigned": ( lambda event: "{} was unassigned from the issue.".format( event["actor"]["login"] ) ), "unlabeled": (lambda event: "A label was removed from the issue."), "unlocked": ( lambda event: "The issue was unlocked by {}".format(event["actor"]["login"]) ), "unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."), "user_blocked": (lambda event: "A user was blocked from the organization."), } def authorize(ghub, reauthorize=False, fromenv=False): """Authorize a user for GHub Keyword arguments: ghub -- the ghub object that needs authorization reauthorize -- performs authorization again (default False) """ if fromenv: oauth_data = json.loads(os.environ["GHUB_CRED"]) ghub.oauth_data = oauth_data ghub.github.token = oauth_data return True if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize: authorization_base_url = "https://github.com/login/oauth/authorize" token_url = "https://github.com/login/oauth/access_token" authorization_url, _ = ghub.github.authorization_url(authorization_base_url) webbrowser.open(authorization_url) print("Please visit this site and grant access: {}".format(authorization_url)) redirect_response = input( "Please enter the URL you were redirected to after granting access: " ) try: response = ghub.github.fetch_token( token_url, client_secret=ghub.client_secret, authorization_response=redirect_response, ) except Exception as e: print(e) print( "Network Error. Make sure you have a working internet connection and try again." ) sys.exit(1) if not os.path.isdir(ghub.data_path): os.makedirs(ghub.data_path) data_file = open(ghub.data_path / ghub.auth_filename, "w+") json.dump(response, data_file) data_file.close() os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR) ghub.oauth_data = response return True else: data_file = open(ghub.data_path / ghub.auth_filename, "r") oauth_data = json.loads(data_file.read()) data_file.close() ghub.oauth_data = oauth_data ghub.github.token = oauth_data return True def get_user(ghub, user): url = ghub.api_url + ghub.endpoints["users"] + user response = ghub.github.get(url) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.context = "user" ghub.context.location = user ghub.context.cache = response.json() return True return False def get_org(ghub, org): url = ghub.api_url + ghub.endpoints["orgs"] + org response = ghub.github.get(url) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.context = "org" ghub.context.location = org ghub.context.cache = response.json() return True return False def get_user_tabs(ghub, tab=""): tabs = ["repos", "stars", "followers", "following", "notifications"] if tab not in tabs: print("{} is not a valid user tab".format(tab)) return if ghub.context.context == "root": if tab == "": ghub.context.set_context_to_root() elif tab == "repos": response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos") if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ghub.user["login"] + "/" + "repos" ghub.context.context = "repos" else: print("Error getting data - " + response.status_code) elif tab == "stars": response = ghub.github.get( ghub.api_url + ghub.endpoints["user"] + "/starred" ) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ghub.user["login"] + "/" + "stars" ghub.context.context = "stars" else: print("Error getting data - " + response.status_code) elif tab == "followers" or tab == "following": response = ghub.github.get( ghub.api_url + ghub.endpoints["user"] + "/" + tab ) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ghub.user["login"] + "/" + tab ghub.context.context = tab else: print("Error getting data - " + response.status_code) elif tab == "notifications": response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"]) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ghub.user["login"] + "/" + tab ghub.context.context = tab else: print("Error getting data - " + response.status_code) elif ghub.context.context == "user" or ghub.context.context == "org": if tab == "": ghub.context.set_context_to_root() elif tab == "repos": if ghub.context.context == "user": url = ( ghub.api_url + ghub.endpoints["users"] + ghub.context.location + "/repos" ) else: url = ( ghub.api_url + ghub.endpoints["orgs"] + ghub.context.location + "/repos" ) response = ghub.github.get(url) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ( ghub.context.prev_context.location + "/" + "repos" ) ghub.context.context = "repos" else: print("Error getting data - " + response.status_code) elif tab == "stars": response = ghub.github.get( ghub.api_url + ghub.endpoints["users"] + ghub.context.location + "/starred" ) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ( ghub.context.prev_context.location + "/" + "star" ) ghub.context.context = "stars" else: print("Error getting data - " + response.status_code) elif tab == "followers" or tab == "following": response = ghub.github.get( ghub.api_url + ghub.endpoints["users"] + ghub.context.location + "/" + tab ) if response.status_code == 200: ghub.context = Context(prev_context=ghub.context) ghub.context.cache = response.json() ghub.context.location = ghub.context.prev_context.location + "/" + tab ghub.context.context = tab else: print("Error getting data - " + response.status_code) else: pass def get_latest_commit(ghub, repo, branch="master"): api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch) response = ghub.github.get(api_url) if response.status_code == 200: response = response.json() return response["commit"]["commit"] else: return False def get_tree(ghub, repo=None, branch="master", tree_url=None): if tree_url == None: latest_commit = get_latest_commit(ghub, repo, branch) if latest_commit == False: return False response = ghub.github.get(latest_commit["tree"]["url"]) if response.status_code == 200: response = response.json() return response return False else: response = ghub.github.get(tree_url) if response.status_code == 200: response = response.json() return response def get_blob(ghub, blob_url): response = ghub.github.get(blob_url) if response.status_code == 200: return response.json() return False def clone_repo(ghub, dir, repo_name=None): print("Preparing to clone...") if repo_name == None: repo_name = "/".join(ghub.context.location.split("/")[:2]) if dir[0] == "~": dir = os.path.expanduser("~") + dir[1:] dir = dir + "/" + repo_name.split("/")[1] try: Repo.clone_from("https://github.com/" + repo_name, dir) print("{} cloned to {}".format(repo_name, dir)) return True except Exception as e: print(e) return False def star_repo(ghub, repo_name=None): print("Starring repo...") if repo_name == None: repo_name = ghub.context.location star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name response = ghub.github.get(star_url) if response.status_code == 204: print("Repo is already starred.") elif response.status_code == 404: resp = ghub.github.put(star_url) if resp.status_code == 204: print("{} starred".format(repo_name)) else: print("Error starring repo") def unstar_repo(ghub, repo_name=None): print("Unstarring repo...") if repo_name == None: repo_name = ghub.context.location star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name response = ghub.github.get(star_url) if response.status_code == 204: resp = ghub.github.delete(star_url) if resp.status_code == 204: print("{} unstarred".format(repo_name)) else: print("Error unstarring repo") elif response.status_code == 404: print("Repo is not starred.") def watch_repo(ghub, repo_name=None): print("Subscribing to repo...") if repo_name == None: repo_name = ghub.context.location watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription" response = ghub.github.get(watch_url) if response.status_code == 200: print("You are already watching this repo.") elif response.status_code == 404: resp = ghub.github.put(watch_url) if resp.status_code == 200: print("Watching {}".format(repo_name)) else: print("Error subscribing to repo") def unwatch_repo(ghub, repo_name=None): print("Unsubscribing repo...") if repo_name == None: repo_name = ghub.context.location watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription" response = ghub.github.get(watch_url) if response.status_code == 200: resp = ghub.github.delete(watch_url) if resp.status_code == 204: print("{} unsubscribed".format(repo_name)) else: print("Error unsubscribing to repo") elif response.status_code == 404: print("You are not watching this repo.") def fork_repo(ghub, repo_name=None): print("Forking Repo...") if repo_name == None: repo_name = ghub.context.location.split("/") repo_name = "/".join(repo_name[:2]) true_repo_name = repo_name.split("/")[1] forked_url = (
# # Autogenerated by Thrift Compiler (0.9.3) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TException, TApplicationException import logging from ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class Iface: def train(self, data): """ Parameters: - data """ pass def getWeight(self): pass def getBiase(self): pass def save(self): pass class Client(Iface): def __init__(self, iprot, oprot=None): self._iprot = self._oprot = iprot if oprot is not None: self._oprot = oprot self._seqid = 0 def train(self, data): """ Parameters: - data """ self.send_train(data) self.recv_train() def send_train(self, data): self._oprot.writeMessageBegin('train', TMessageType.CALL, self._seqid) args = train_args() args.data = data args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_train(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = train_result() result.read(iprot) iprot.readMessageEnd() return def getWeight(self): self.send_getWeight() return self.recv_getWeight() def send_getWeight(self): self._oprot.writeMessageBegin('getWeight', TMessageType.CALL, self._seqid) args = getWeight_args() args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getWeight(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getWeight_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "getWeight failed: unknown result") def getBiase(self): self.send_getBiase() return self.recv_getBiase() def send_getBiase(self): self._oprot.writeMessageBegin('getBiase', TMessageType.CALL, self._seqid) args = getBiase_args() args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getBiase(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getBiase_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "getBiase failed: unknown result") def save(self): self.send_save() self.recv_save() def send_save(self): self._oprot.writeMessageBegin('save', TMessageType.CALL, self._seqid) args = save_args() args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_save(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = save_result() result.read(iprot) iprot.readMessageEnd() return class Processor(Iface, TProcessor): def __init__(self, handler): self._handler = handler self._processMap = {} self._processMap["train"] = Processor.process_train self._processMap["getWeight"] = Processor.process_getWeight self._processMap["getBiase"] = Processor.process_getBiase self._processMap["save"] = Processor.process_save def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() if name not in self._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() return else: self._processMap[name](self, seqid, iprot, oprot) return True def process_train(self, seqid, iprot, oprot): args = train_args() args.read(iprot) iprot.readMessageEnd() result = train_result() try: self._handler.train(args.data) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("train", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getWeight(self, seqid, iprot, oprot): args = getWeight_args() args.read(iprot) iprot.readMessageEnd() result = getWeight_result() try: result.success = self._handler.getWeight() msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getWeight", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getBiase(self, seqid, iprot, oprot): args = getBiase_args() args.read(iprot) iprot.readMessageEnd() result = getBiase_result() try: result.success = self._handler.getBiase() msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getBiase", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_save(self, seqid, iprot, oprot): args = save_args() args.read(iprot) iprot.readMessageEnd() result = save_result() try: self._handler.save() msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("save", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() # HELPER FUNCTIONS AND STRUCTURES class train_args: """ Attributes: - data """ thrift_spec = ( None, # 0 (1, TType.LIST, 'data', (TType.LIST,(TType.LIST,(TType.DOUBLE,None))), None, ), # 1 ) def __init__(self, data=None,): self.data = data def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.data = [] (_etype3, _size0) = iprot.readListBegin() for _i4 in xrange(_size0): _elem5 = [] (_etype9, _size6) = iprot.readListBegin() for _i10 in xrange(_size6): _elem11 = [] (_etype15, _size12) = iprot.readListBegin() for _i16 in xrange(_size12): _elem17 = iprot.readDouble() _elem11.append(_elem17) iprot.readListEnd() _elem5.append(_elem11) iprot.readListEnd() self.data.append(_elem5) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('train_args') if self.data is not None: oprot.writeFieldBegin('data', TType.LIST, 1) oprot.writeListBegin(TType.LIST, len(self.data)) for iter18 in self.data: oprot.writeListBegin(TType.LIST, len(iter18)) for iter19 in iter18: oprot.writeListBegin(TType.DOUBLE, len(iter19)) for iter20 in iter19: oprot.writeDouble(iter20) oprot.writeListEnd() oprot.writeListEnd() oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.data) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class train_result: thrift_spec = ( ) def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('train_result') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getWeight_args: thrift_spec = ( ) def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getWeight_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getWeight_result: """ Attributes: - success """ thrift_spec = ( (0, TType.LIST, 'success', (TType.LIST,(TType.LIST,(TType.DOUBLE,None))), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype24, _size21) = iprot.readListBegin() for _i25 in xrange(_size21): _elem26 = [] (_etype30, _size27) = iprot.readListBegin() for _i31 in xrange(_size27): _elem32 = [] (_etype36, _size33) = iprot.readListBegin() for _i37 in xrange(_size33): _elem38 = iprot.readDouble() _elem32.append(_elem38) iprot.readListEnd() _elem26.append(_elem32) iprot.readListEnd() self.success.append(_elem26) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getWeight_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.LIST, len(self.success)) for iter39 in self.success: oprot.writeListBegin(TType.LIST, len(iter39)) for iter40 in iter39: oprot.writeListBegin(TType.DOUBLE, len(iter40)) for iter41 in iter40: oprot.writeDouble(iter41) oprot.writeListEnd() oprot.writeListEnd() oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getBiase_args: thrift_spec = ( ) def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec
<gh_stars>1-10 __author__ = 'saeedamen' # <NAME> / <EMAIL> # # Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and limitations under the License. # """ AdapterPyThalesians Wrapper for matplotlib for charting, with an easier to use common interface. Currently supports simple line, bar and scatter plots. Like Seaborne, this wrapper seeks to make "nicer" plots than the matplotlib defaults. """ # matplotlib based libraries import matplotlib.pyplot as plt import matplotlib from matplotlib.dates import YearLocator, MonthLocator, DayLocator, HourLocator, MinuteLocator from matplotlib.ticker import MultipleLocator from matplotlib.ticker import Formatter # for manipulating dates and maths from datetime import timedelta import numpy as np from pythalesians.graphics.graphs.lowleveladapters.adaptertemplate import AdapterTemplate from pythalesians.graphics.graphs.graphproperties import GraphProperties from pythalesians.util.constants import Constants class AdapterPyThalesians(AdapterTemplate): def plot_2d_graph(self, data_frame, gp, chart_type): if gp is None: gp = GraphProperties() if gp.chart_type is None and chart_type is None: chart_type = 'line' if gp.resample is not None: data_frame = data_frame.asfreq(gp.resample) self.apply_style_sheet(gp) # create figure & add a subplot fig = plt.figure(figsize = ((gp.width * gp.scale_factor)/gp.dpi, (gp.height * gp.scale_factor)/gp.dpi), dpi = gp.dpi) ax = fig.add_subplot(111) if gp.x_title != '': ax.set_xlabel(gp.x_title) if gp.y_title != '': ax.set_ylabel(gp.y_title) plt.xlabel(gp.x_title) plt.ylabel(gp.y_title) fig.suptitle(gp.title, fontsize = 14 * gp.scale_factor) # format Y axis y_formatter = matplotlib.ticker.ScalarFormatter(useOffset = False) ax.yaxis.set_major_formatter(y_formatter) # create a second y axis if necessary ax2 = [] if gp.y_axis_2_series != []: ax2 = ax.twinx() # do not use a grid with multiple y axes ax.yaxis.grid(False) ax2.yaxis.grid(False) # matplotlib 1.5 try: cyc = matplotlib.rcParams['axes.prop_cycle'] color_cycle = [x['color'] for x in cyc] except KeyError: # pre 1.5 pass # color_cycle = matplotlib.rcParams['axes.color_cycle'] bar_ind = np.arange(0, len(data_frame.index)) # for bar charts, create a proxy x-axis (then relabel) xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, gp, chart_type, bar_ind) # plot the lines (using custom palettes as appropriate) try: # get all the correct colors (and construct gradients if necessary eg. from 'blues') color_spec = self.create_color_list(gp, data_frame) # for stacked bar yoff_pos = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart yoff_neg = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart zeros = np.zeros(len(data_frame.index.values)) # for bar chart bar_space = 0.2 bar_width = (1 - bar_space) / (no_of_bars) bar_index = 0 has_matrix = False # some lines we should exclude from the color and use the default palette for i in range(0, len(data_frame.columns.values)): if gp.chart_type is not None: if isinstance(gp.chart_type, list): chart_type = gp.chart_type[i] else: chart_type = gp.chart_type if chart_type == 'heatmap': # TODO experimental! # ax.set_frame_on(False) ax.pcolor(data_frame, cmap=plt.cm.Blues, alpha=0.8) # plt.colorbar() has_matrix = True break label = str(data_frame.columns[i]) ax_temp = self.get_axis(ax, ax2, label, gp.y_axis_2_series) yd = data_frame.ix[:,i] if color_spec[i] is None: color_spec[i] = color_cycle[i % len(color_cycle)] if (chart_type == 'line'): linewidth_t = self.get_linewidth(label, gp.linewidth, gp.linewidth_2, gp.linewidth_2_series) if linewidth_t is None: linewidth_t = matplotlib.rcParams['axes.linewidth'] ax_temp.plot(xd, yd, label = label, color = color_spec[i], linewidth = linewidth_t) elif(chart_type == 'bar'): # for multiple bars we need to allocate space properly bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0,len(bar_ind))] ax_temp.bar(bar_pos, yd, bar_width, label = label, color = color_spec[i]) bar_index = bar_index + 1 elif(chart_type == 'stacked'): bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0,len(bar_ind))] yoff = np.where(yd > 0, yoff_pos, yoff_neg) ax_temp.bar(bar_pos, yd, label = label, color = color_spec[i], bottom = yoff) yoff_pos = yoff_pos + np.maximum(yd, zeros) yoff_neg = yoff_neg + np.minimum(yd, zeros) # bar_index = bar_index + 1 elif(chart_type == 'scatter'): ax_temp.scatter(xd, yd, label = label, color = color_spec[i]) if gp.line_of_best_fit is True: self.trendline(ax_temp, xd.values, yd.values, order=1, color= color_spec[i], alpha=1, scale_factor = gp.scale_factor) # format X axis self.format_x_axis(ax, data_frame, gp, has_bar, bar_ind, has_matrix) except: pass if gp.display_source_label == True: ax.annotate('Source: ' + gp.source, xy = (1, 0), xycoords='axes fraction', fontsize=7 * gp.scale_factor, xytext=(-5 * gp.scale_factor, 10 * gp.scale_factor), textcoords='offset points', ha='right', va='top', color = gp.source_color) if gp.display_brand_label == True: self.create_brand_label(ax, anno = gp.brand_label, scale_factor = gp.scale_factor) leg = [] leg2 = [] loc = 'best' # if we have two y-axis then make sure legends are in opposite corners if ax2 != []: loc = 2 try: leg = ax.legend(loc = loc, prop={'size':10 * gp.scale_factor}) leg.get_frame().set_linewidth(0.0) leg.get_frame().set_alpha(0) if ax2 != []: leg2 = ax2.legend(loc = 1, prop={'size':10 * gp.scale_factor}) leg2.get_frame().set_linewidth(0.0) leg2.get_frame().set_alpha(0) except: pass try: if gp.display_legend is False: if leg != []: leg.remove() if leg2 != []: leg.remove() except: pass try: plt.savefig(gp.file_output, transparent=False) except: pass ####### various matplotlib converters are unstable # convert to D3 format with mpld3 try: # output matplotlib charts externally to D3 based libraries import mpld3 if gp.display_mpld3 == True: mpld3.save_d3_html(fig, gp.html_file_output) mpld3.show(fig) except: pass # FRAGILE! convert to Bokeh format # better to use direct Bokeh renderer try: if (gp.convert_matplotlib_to_bokeh == True): from bokeh.plotting import output_file, show from bokeh import mpl output_file(gp.html_file_output) show(mpl.to_bokeh()) except: pass # FRAGILE! convert matplotlib chart to Plotly format # recommend using AdapterCufflinks instead to directly plot to Plotly try: import plotly.plotly as py import plotly import plotly.tools as tls if gp.convert_matplotlib_to_plotly == True: plotly.tools.set_credentials_file(username = gp.plotly_username, api_key = gp.plotly_api_key) py_fig = tls.mpl_to_plotly(fig, strip_style = True) plot_url = py.plot_mpl(py_fig, filename = gp.plotly_url) except: pass # display in matplotlib window try: if Constants.plotfactory_silent_display == True: pass elif gp.silent_display == False: plt.show() except: pass def apply_style_sheet(self, gp): # set the matplotlib style sheet & defaults matplotlib.rcdefaults() # first search PyThalesians styles, then try matplotlib try: plt.style.use(Constants().plotfactory_pythalesians_style_sheet[gp.style_sheet]) except: plt.style.use(gp.style_sheet) # adjust font size for scale factor matplotlib.rcParams.update({'font.size': matplotlib.rcParams['font.size'] * gp.scale_factor}) # do not use offsets/scientific notation matplotlib.rcParams.update({'axes.formatter.useoffset': False}) def format_x_axis(self, ax, data_frame, gp, has_bar, bar_ind, has_matrix): if has_matrix: # ax.colorbar() # ax.xticks(rotation=90) ax.set_xticks(bar_ind) ax.set_xlim([0, len(bar_ind)]) ax.set_yticks(bar_ind) ax.set_ylim([0, len(bar_ind)]) ax.set_xticklabels(data_frame.columns, minor=False) ax.set_yticklabels(data_frame.index, minor=False) # ax.plot([], []) for y in range(len(data_frame.index)): for x in range(len(data_frame.columns)): plt.text(x + 0.5, y + 0.5, '%.0f' % data_frame.loc[y, x], horizontalalignment='center', verticalalignment='center', ) return if has_bar: ax.set_xticks(bar_ind) ax.set_xticklabels(data_frame.index) ax.set_xlim([-1, len(bar_ind)]) # if lots of labels make text smaller and rotate if len(bar_ind) > 6: plt.setp(plt.xticks()[1], rotation=90) # plt.gca().tight_layout() # matplotlib.rcParams.update({'figure.autolayout': True}) # plt.gcf().subplots_adjust(bottom=5) import matplotlib.dates as mdates if gp.date_formatter is not None: ax.format_xdata = mdates.DateFormatter(gp.date_formatter) plt.tight_layout() # ax.tick_params(axis='x', labelsize=matplotlib.rcParams['font.size'] * 0.5) return # format X axis dates = data_frame.index # scaling for time series plots with hours and minutes only (and no dates) if hasattr(data_frame.index[0], 'hour') and not(hasattr(data_frame.index[0], 'month')): ax.xaxis.set_major_locator(MultipleLocator(86400./3.)) ax.xaxis.set_minor_locator(MultipleLocator(86400./24.)) ax.grid(b = True, which='minor', color='w', linewidth=0.5) # TODO have more refined way of formating time series x-axis! # scaling for time series plots with dates too else: # to handle dates try: dates = dates.to_pydatetime() diff = data_frame.index[-1] - data_frame.index[0] import matplotlib.dates as md if gp.date_formatter is not None: ax.xaxis.set_major_formatter(md.DateFormatter(gp.date_formatter)) elif diff < timedelta(days = 4): # class MyFormatter(Formatter): # def __init__(self, dates, fmt='%H:%M'): # self.dates = dates # self.fmt = fmt # # def __call__(self, x, pos=0): # 'Return the label for time x at position pos' # ind = int(round(x)) # if ind >= len(self.dates) or ind < 0: return '' # # return self.dates[ind].strftime(self.fmt) # # formatter = MyFormatter(dates) # ax.xaxis.set_major_formatter(formatter) date_formatter = '%H:%M' xfmt = md.DateFormatter(date_formatter) ax.xaxis.set_major_formatter(xfmt) if diff < timedelta(minutes=20): ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=2)) ax.xaxis.set_minor_locator(MinuteLocator(interval=1)) elif diff < timedelta(hours=1): ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=5)) ax.xaxis.set_minor_locator(MinuteLocator(interval=2)) elif diff < timedelta(hours=6): locator = HourLocator(interval=1) ax.xaxis.set_major_locator(locator) ax.xaxis.set_minor_locator(MinuteLocator(interval=30)) elif diff < timedelta(days=3): ax.xaxis.set_major_locator(HourLocator(interval=6)) ax.xaxis.set_minor_locator(HourLocator(interval=1)) elif diff < timedelta(days=10): locator = DayLocator(interval=2) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y')) day_locator = DayLocator(interval=1) ax.xaxis.set_minor_locator(day_locator) elif diff < timedelta(days=40): locator = DayLocator(interval=10) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y')) day_locator = DayLocator(interval=1) ax.xaxis.set_minor_locator(day_locator) elif diff < timedelta(days=365 * 0.5): locator = MonthLocator(bymonthday=1, interval=2) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%b %y')) months_locator = MonthLocator(interval=1) ax.xaxis.set_minor_locator(months_locator) elif diff < timedelta(days=365 * 2): locator = MonthLocator(bymonthday=1, interval=3) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%b %y')) months_locator = MonthLocator(interval=1) ax.xaxis.set_minor_locator(months_locator) elif
= fgu['TILES'] # tilesu = fu['TILES'] # tlids = fgu['TILELOCIDS'] # tlidsu = fu['TILELOCIDS'] # # for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary # tid = tidsu[ii]#fu[ii]['TARGETID'] # wt = tids == tid # ot = tilesu[ii] # otl = tlidsu[ii] # tt = tiles[wt] # tti = tlids[wt] # for tl in tt: # if tl != ot: # tilesu[ii] += '-'+str(tl) # for ti in tti: # if ti != otl: # tlidsu[ii] += '-'+str(ti) # if ii%1000 == 0: # print(ii) # fu['TILES'] = tilesu # fu['TILELOCIDS'] = tlidsu # # #wa = fu['LOCATION_ASSIGNED'] == 1 # #wa &= fu['PRIORITY_ASSIGNED'] >= 2000 print(np.sum(fu['LOCATION_ASSIGNED'])) #need to resort tile string # fl = np.chararray(len(fu),unicode=True,itemsize=100) # for ii in range(0,len(fu)): # tl = fu['TILES'][ii] # tls = tl.split('-')#.astype('int') # tli = tls[0] # if len(tls) > 1: # #tls = tls.astype('int') # tls.sort() # tli = tls[0] # for i in range(1,len(tls)): # tli += '-'+tls[i] # #else: # # tli = tls # #print(tli) # fl[ii] = tli # # fu['TILES'] = fl #print(np.unique(fu['TILES'])) # print('number of unique tiles configurations '+str(len(np.unique(fu['TILES'])))) #fu.write(catdir+tp+'Alltiles_'+pd+'_full.dat.fits',format='fits', overwrite=True) fu.write(catdir+'/datcomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True) def countloc(aa): locs = aa['LOCATION_AVAIL'] locsa = aa['LOCATION_ASSIGNED'] la = np.max(locs)+1 nl = np.zeros(la) nla = np.zeros(la) for i in range(0,len(aa)): nl[locs[i]] += 1 nla[locs[i]] += locsa[i] return nl,nla def combran_wdup(tiles,rann,randir,tp,lspecdir,specf,keepcols=[]): s = 0 td = 0 #tiles.sort('ZDATE') print(len(tiles)) delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\ 'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY'] outf = randir+str(rann)+'/rancomb_'+tp+'wdup_Alltiles.fits' if os.path.isfile(outf): fgu = Table.read(outf) #tarsn.keep_columns(['RA','DEC','TARGETID''LOCATION','FIBER','TILEID']) s = 1 tdone = np.unique(fgu['TILEID']) tmask = ~np.isin(tiles['TILEID'],tdone) else: tmask = np.ones(len(tiles)).astype('bool') for tile in tiles[tmask]['TILEID']: ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits' ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits' if os.path.isfile(ffa): fa = Table.read(ffa,hdu='FAVAIL') ffna = Table.read(ffna) fgun = join(fa,ffna,keys=['TARGETID']) #fgun.remove_columns(delcols) td += 1 fgun['TILEID'] = int(tile) fgun.keep_columns(['RA','DEC','TARGETID','LOCATION','FIBER','TILEID']) if s == 0: fgu = fgun s = 1 else: fgu = vstack([fgu,fgun],metadata_conflicts='silent') fgu.sort('TARGETID') print(tile,td, len(tiles), len(fgun),len(fgu)) else: print('did not find '+ffa) if len(tiles[tmask]['TILEID']) > 0: fgu.write(outf,format='fits', overwrite=True) #specf = Table.read(lspecdir+'datcomb_'+tp+'_spec_zdone.fits') specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] specf.keep_columns(keepcols) #specf.keep_columns(['ZWARN','LOCATION','TILEID','TILELOCID','FIBERSTATUS','FIBERASSIGN_X','FIBERASSIGN_Y','PRIORITY','DELTA_X','DELTA_Y','EXPTIME','PSF_TO_FIBER_SPECFLUX','TSNR2_ELG_B','TSNR2_LYA_B','TSNR2_BGS_B','TSNR2_QSO_B','TSNR2_LRG_B','TSNR2_ELG_R','TSNR2_LYA_R','TSNR2_BGS_R','TSNR2_QSO_R','TSNR2_LRG_R','TSNR2_ELG_Z','TSNR2_LYA_Z','TSNR2_BGS_Z','TSNR2_QSO_Z','TSNR2_LRG_Z','TSNR2_ELG','TSNR2_LYA','TSNR2_BGS','TSNR2_QSO','TSNR2_LRG']) fgu = join(fgu,specf,keys=['LOCATION','TILEID','FIBER'],join_type='left') fgu.sort('TARGETID') outf = lspecdir+'/rancomb_'+str(rann)+tp+'wdupspec_zdone.fits' print(outf) fgu.write(outf,format='fits', overwrite=True) def combran(tiles,rann,randir,ddir,tp,tmask,tc='SV3_DESI_TARGET',imask=False): s = 0 td = 0 #tiles.sort('ZDATE') print(len(tiles)) delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\ 'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY'] for tile,zdate in zip(tiles['TILEID'],tiles['ZDATE']): tspec = combfibmap_and_scores(tile,zdate) pdict,gloc = goodlocdict(tspec) tspec.keep_columns(['LOCATION','FIBERSTATUS','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','TSNR2_ELG','TSNR2_LRG','TSNR2_QSO','TSNR2_BGS']) dt = ddir+'ALL'+str(tile)+'_full.dat.fits' ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits' ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits' if os.path.isfile(ffa): fd = Table.read(dt) # print(np.sum(fd['LOCATION_ASSIGNED']),len(fd)) #gloc = np.unique(fd['LOCATION_AVAIL']) #bad locations already removed from this files #print(np.sum(fd['LOCATION_ASSIGNED']),len(fd),len(gloc)) if tp != 'dark' and tp != 'bright': wt = (fd[tc] & tmask[tp]) > 0 fd = fd[wt] #print(np.sum(fd['LOCATION_ASSIGNED']),len(fd)) nl,nla = countloc(fd) #commenting out zfailure stuff, not vetoing randoms based on that #wzf = fd['ZWARN'] != 0 #wzf &= fd['ZWARN'] != 999999 #wzf &= fd['ZWARN']*0 == 0 #loc_fail = np.unique(fd[wzf]['LOCATION']) #print('number of zfail locations',len(loc_fail)) # #print(np.sum(fd['LOCATION_ASSIGNED']),len(np.unique(fd['LOCATION_AVAIL'])),np.sum(nla),np.sum(nl)) # #find the locations that were requested by type but not assigned fa = Table.read(ffa,hdu='FAVAIL') wg = np.isin(fa['LOCATION'],gloc) fa = fa[wg] fa = join(fa,tspec,keys=['LOCATION'],join_type='left') #fa['FIBER_GOOD'] = np.zeros(len(fa)).astype(int) #fa['FIBER_GOOD'][wg] = 1 #fa['Z_NOTBAD'] = np.zeros(len(fa)).astype(int) #wnzf = ~np.isin(fa['LOCATION'],loc_fail) #fa['Z_NOTBAD'][wnzf] = 1 fa['ZPOSS'] = np.zeros(len(fa)).astype(int) #fa['ZPOSSNOTBAD'] = np.zeros(len(fa)).astype(int) if tp != 'dark' and tp != 'bright': #fa['LOC_NOTBLOCK'] = np.zeros(len(fa)).astype(int) locsna = [] for i in range(0,len(nla)): if nla[i] == 0 and nl[i] > 0: locsna.append(i) print('number of unassigned locations',len(locsna)) ntloc = len(gloc)-len(locsna)#-len(loc_fail) print('total number of assignable positions',ntloc) was = ~np.isin(fa['LOCATION'],locsna) #fa['LOC_NOTBLOCK'][was] = 1 #wg &= was fa['ZPOSS'][was] = 1 #fa['ZPOSSNOTBAD'][was&wnzf] = 1 #if maskzfail: # wg &= wnzf #wzt = wpr & ~wzf & ~wna #fg = fa[wg] #print(len(fa),np.sum(fa['ZPOSSNOTBAD'])) #fg = fa #print('before,after vetoing locations:') #print(len(fa),len(fg)) #if tp != 'dark' and tp != 'bright': # fa.sort('ZPOSS') #else: # fg.sort('FIBER_GOOD') fgun = unique(fa,keys=['TARGETID'],keep='last') ffna = Table.read(ffna) fgun = join(fgun,ffna,keys=['TARGETID']) fgun.remove_columns(delcols) if imask: wm = fgun['MASKBITS'] == 0 fgun = fgun[wm] print(tile,td, len(tiles), str(len(fgun))+' unique new randoms') td += 1 aa = np.chararray(len(fgun),unicode=True,itemsize=100) aa[:] = str(tile) fgun['TILE'] = int(tile) fgun['TILES'] = aa fgun['TILELOCID'] = 10000*tile +fgun['LOCATION'] if s == 0: fgu = fgun s = 1 else: fv = vstack([fgu,fgun],metadata_conflicts='silent') fgo = fgu.copy() fgu = unique(fv,keys='TARGETID')#,keep='last') dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs #print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids])) fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets #if this works, can save vetoing until the end fgu['TSNR2_ELG'][didsc] = np.maximum(fgu['TSNR2_ELG'][didsc],fgun['TSNR2_ELG'][dids]) fgu['TSNR2_QSO'][didsc] = np.maximum(fgu['TSNR2_QSO'][didsc],fgun['TSNR2_QSO'][dids]) fgu['TSNR2_BGS'][didsc] = np.maximum(fgu['TSNR2_BGS'][didsc],fgun['TSNR2_BGS'][dids]) fgu['TSNR2_LRG'][didsc] = np.maximum(fgu['TSNR2_LRG'][didsc],fgun['TSNR2_LRG'][dids]) if tp != 'dark' and tp != 'bright': #fgu['FIBER_GOOD'][didsc] = np.maximum(fgu['FIBER_GOOD'][didsc],fgun['FIBER_GOOD'][dids]) #fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids]) #fgu['Z_NOTBAD'][didsc] = np.maximum(fgu['Z_NOTBAD'][didsc],fgun['Z_NOTBAD'][dids]) fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids]) #fgu['ZPOSSNOTBAD'][didsc] = np.maximum(fgu['ZPOSSNOTBAD'][didsc],fgun['ZPOSSNOTBAD'][dids]) aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20) aa[:] = '-'+str(tile) #rint(aa) ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc]) #print(ms) fgu['TILES'][didsc] = ms #add the tile info print(str(len(fgu))+' unique total randoms') else: print('did not find '+ffa) #fgu.sort('ZPOSS') #fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100') #fu = unique(fgu,keys=['TARGETID'])#,keep='last') fu = fgu #fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True) #return True # tiles = fgu['TILES'] # tilesu = fu['TILES'] #tlids = fgu['TILELOCIDS'] #tlidsu = fu['TILELOCIDS'] # for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary # tid = tidsu[ii]#fu[ii]['TARGETID'] # wt = tids == tid # ot = tilesu[ii] # #otl = tlidsu[ii] # tt = tiles[wt] # #tti = tlids[wt] # for tl in tt: # if tl != ot: # tilesu[ii] += '-'+str(tl) # #for ti in tti: # # if ti != otl: # # tlidsu[ii] += '-'+str(ti) # if ii%1000 == 0: # print(ii) # fu['TILES'] = tilesu #fu['TILELOCIDS'] = tlidsu fl = np.chararray(len(fu),unicode=True,itemsize=100) for ii in range(0,len(fu)): tl = fu['TILES'][ii] tls = tl.split('-')#.astype('int') tli = tls[0] if len(tls) > 1: #tls = tls.astype('int') tls.sort() tli = tls[0] for i in range(1,len(tls)): tli += '-'+tls[i] #else: # tli = tls #print(tli) fl[ii] = tli fu['TILES'] = fl print('number of unique tiles configurations '+str(len(np.unique(fu['TILES'])))) NT = np.zeros(len(fgu)) ros = np.zeros(len(fgu)) print('counting tiles and finding rosette') for ii in range(0,len(fu['TILES'])): #not sure why, but this only works when using loop for Table.read but array option works for fitsio.read NT[ii] = np.char.count(fu['TILES'][ii],'-')+1 ti = int(fu['TILES'][ii].split('-')[0]) ros[ii] = tile2rosette(ti) fu['NTILE'] = NT fu['rosette_number'] = ros print(np.unique(fu['rosette_number'],return_counts=True)) fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True) def mkfullran(indir,rann,imbits,outf,tp,pd,bit,desitarg='SV3_DESI_TARGET',tsnr= 'TSNR2_ELG',notqso='',qsobit=4,fbcol='COADD_FIBERSTATUS'): # selz = dz['ZWARN'] != 999999 # fs = dz[selz] # # #first, need to find locations to veto based data # nodata = fs["ZWARN_MTL"] & zwarn_mask["NODATA"] != 0 # num_nod = np.sum(nodata) # print('number with no data '+str(num_nod)) # badqa = fs["ZWARN_MTL"] & zwarn_mask.mask("BAD_SPECQA|BAD_PETALQA") != 0 # num_badqa = np.sum(badqa) # print('number with bad qa '+str(num_badqa)) # nomtl = nodata & badqa # wfqa = ~nomtl # #wf = fs['FIBERSTATUS'] == 0 # if specver == 'daily': # fbcol = 'FIBERSTATUS' # if specver == 'everest': # fbcol = 'COADD_FIBERSTATUS' # wf = fs[fbcol] == 0 # print(len(fs[wf]),len(fs[wfqa])) zf = indir+'/datcomb_'+pd+'_tarspecwdup_zdone.fits' dz = Table.read(zf) fs = get_specdat(indir,pd) stlid = 10000*fs['TILEID'] +fs['LOCATION'] gtl = np.unique(stlid) wtype = ((dz[desitarg] & bit) > 0) if notqso == 'notqso': wtype &= ((dz[desitarg] & qsobit) == 0) wg = np.isin(dz['TILELOCID'],gtl) dz = dz[wtype&wg] print('length after selecting type and fiberstatus == 0 '+str(len(dz))) lznp = find_znotposs(dz) zf = indir+'/rancomb_'+str(rann)+pd+'wdupspec_zdone.fits' dz = Table.read(zf) #dz.remove_columns(['TILES','NTILE']) zfpd = indir+'/rancomb_'+str(rann)+pd+'_Alltilelocinfo.fits' dzpd = Table.read(zfpd) #dzpd.keep_columns(['TARGETID','TILES','NTILE']) dz = join(dz,dzpd,keys=['TARGETID']) #if maskzfail: # wk = dz['ZPOSSNOTBAD'] == 1 #else: # wk = dz['ZPOSS'] == 1 print('length before cutting to good positions '+str(len(dz))) wk = ~np.isin(dz['TILELOCID'],lznp) wk &= np.isin(dz['TILELOCID'],gtl) dz = dz[wk] print('length after cutting to good positions '+str(len(dz))) dirrt='/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/' tcol = ['TARGETID','MASKBITS','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z'] #only including what are necessary for mask cuts for now #tcol = ['TARGETID','EBV','WISEMASK_W1','WISEMASK_W2','BRICKID','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G',\ #'GALDEPTH_R','GALDEPTH_Z','PSFDEPTH_W1','PSFDEPTH_W2','PSFSIZE_G','PSFSIZE_R','PSFSIZE_Z','MASKBITS','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z'] tarf = fitsio.read(dirrt+'/randoms-1-'+str(rann)+'.fits',columns=tcol) dz = join(dz,tarf,keys=['TARGETID']) del tarf dz = cutphotmask(dz,imbits) print('length after cutting to based on imaging veto mask '+str(len(dz))) dz.sort(tsnr) #should allow to later cut on tsnr for match to data dz = unique(dz,keys=['TARGETID'],keep='last') print('length after cutting to unique TARGETID '+str(len(dz))) print(np.unique(dz['NTILE'])) dz.write(outf,format='fits', overwrite=True) del dz def addcol_ran(fn,rann,dirrt='/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/',ecol=['TARGETID','EBV','WISEMASK_W1','WISEMASK_W2','BRICKID','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G','GALDEPTH_R','GALDEPTH_Z','PSFDEPTH_W1','PSFDEPTH_W2','PSFSIZE_G','PSFSIZE_R','PSFSIZE_Z']): dz = fitsio.read(fn) tarf = fitsio.read(dirrt+'/randoms-1-'+str(rann)+'.fits',columns=ecol) dz = join(dz,tarf,keys=['TARGETID']) dz.write(fn,format='fits', overwrite=True) del dz def mkfulldat(zf,imbits,ftar,tp,bit,outf,ftiles,azf='',desitarg='DESI_TARGET',specver='daily',notqso='',qsobit=4): from scipy.special import erf #from desitarget.mtl import inflate_ledger if tp[:3] == 'BGS' or tp[:3] == 'MWS': pd = 'bright' tscol = 'TSNR2_BGS' else: pd = 'dark' tscol = 'TSNR2_ELG' #fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+specver+'/datcomb_'+pd+'_spec_zdone.fits') # dz = Table.read(zf) # selz = dz['ZWARN_MTL'] != 999999 # fs = dz[selz] #
>= 0) m.c5677 = Constraint(expr= m.x6650 - 5*m.b7082 >= 0) m.c5678 = Constraint(expr= m.x6651 - 25*m.b7086 >= 0) m.c5679 = Constraint(expr= m.x6652 - 5*m.b7085 >= 0) m.c5680 = Constraint(expr= m.x6653 - 25*m.b7086 >= 0) m.c5681 = Constraint(expr= m.x6654 - 30*m.b7090 >= 0) m.c5682 = Constraint(expr= m.x6655 - 25*m.b7089 >= 0) m.c5683 = Constraint(expr= m.x6656 - 30*m.b7090 >= 0) m.c5684 = Constraint(expr= m.x6657 - 5*m.b7088 >= 0) m.c5685 = Constraint(expr= m.x6658 - 25*m.b7089 >= 0) m.c5686 = Constraint(expr= m.x6659 - 30*m.b7090 >= 0) m.c5687 = Constraint(expr= m.x6660 - 30*m.b7093 >= 0) m.c5688 = Constraint(expr= m.x6661 - 25*m.b7092 >= 0) m.c5689 = Constraint(expr= m.x6662 - 30*m.b7093 >= 0) m.c5690 = Constraint(expr= m.x6663 - 5*m.b7091 >= 0) m.c5691 = Constraint(expr= m.x6664 - 25*m.b7092 >= 0) m.c5692 = Constraint(expr= m.x6665 - 30*m.b7093 >= 0) m.c5693 = Constraint(expr= m.x6666 - 5*m.b7094 >= 0) m.c5694 = Constraint(expr= m.x6667 - 25*m.b7098 >= 0) m.c5695 = Constraint(expr= m.x6668 - 5*m.b7097 >= 0) m.c5696 = Constraint(expr= m.x6669 - 25*m.b7098 >= 0) m.c5697 = Constraint(expr= m.x6670 - 30*m.b7102 >= 0) m.c5698 = Constraint(expr= m.x6671 - 25*m.b7101 >= 0) m.c5699 = Constraint(expr= m.x6672 - 30*m.b7102 >= 0) m.c5700 = Constraint(expr= m.x6673 - 5*m.b7100 >= 0) m.c5701 = Constraint(expr= m.x6674 - 25*m.b7101 >= 0) m.c5702 = Constraint(expr= m.x6675 - 30*m.b7102 >= 0) m.c5703 = Constraint(expr= m.x6676 - 30*m.b7105 >= 0) m.c5704 = Constraint(expr= m.x6677 - 25*m.b7104 >= 0) m.c5705 = Constraint(expr= m.x6678 - 30*m.b7105 >= 0) m.c5706 = Constraint(expr= m.x6679 - 5*m.b7103 >= 0) m.c5707 = Constraint(expr= m.x6680 - 25*m.b7104 >= 0) m.c5708 = Constraint(expr= m.x6681 - 30*m.b7105 >= 0) m.c5709 = Constraint(expr= m.x6682 - 5*m.b7106 >= 0) m.c5710 = Constraint(expr= m.x6683 - 25*m.b7110 >= 0) m.c5711 = Constraint(expr= m.x6684 - 5*m.b7109 >= 0) m.c5712 = Constraint(expr= m.x6685 - 25*m.b7110 >= 0) m.c5713 = Constraint(expr= m.x6686 - 30*m.b7114 >= 0) m.c5714 = Constraint(expr= m.x6687 - 25*m.b7113 >= 0) m.c5715 = Constraint(expr= m.x6688 - 30*m.b7114 >= 0) m.c5716 = Constraint(expr= m.x6689 - 5*m.b7112 >= 0) m.c5717 = Constraint(expr= m.x6690 - 25*m.b7113 >= 0) m.c5718 = Constraint(expr= m.x6691 - 30*m.b7114 >= 0) m.c5719 = Constraint(expr= m.x6692 - 30*m.b7117 >= 0) m.c5720 = Constraint(expr= m.x6693 - 25*m.b7116 >= 0) m.c5721 = Constraint(expr= m.x6694 - 30*m.b7117 >= 0) m.c5722 = Constraint(expr= m.x6695 - 5*m.b7115 >= 0) m.c5723 = Constraint(expr= m.x6696 - 25*m.b7116 >= 0) m.c5724 = Constraint(expr= m.x6697 - 30*m.b7117 >= 0) m.c5725 = Constraint(expr= m.x6698 - 5*m.b7118 >= 0) m.c5726 = Constraint(expr= m.x6699 - 25*m.b7122 >= 0) m.c5727 = Constraint(expr= m.x6700 - 5*m.b7121 >= 0) m.c5728 = Constraint(expr= m.x6701 - 25*m.b7122 >= 0) m.c5729 = Constraint(expr= m.x6702 - 30*m.b7126 >= 0) m.c5730 = Constraint(expr= m.x6703 - 25*m.b7125 >= 0) m.c5731 = Constraint(expr= m.x6704 - 30*m.b7126 >= 0) m.c5732 = Constraint(expr= m.x6705 - 5*m.b7124 >= 0) m.c5733 = Constraint(expr= m.x6706 - 25*m.b7125 >= 0) m.c5734 = Constraint(expr= m.x6707 - 30*m.b7126 >= 0) m.c5735 = Constraint(expr= m.x6708 - 30*m.b7129 >= 0) m.c5736 = Constraint(expr= m.x6709 - 25*m.b7128 >= 0) m.c5737 = Constraint(expr= m.x6710 - 30*m.b7129 >= 0) m.c5738 = Constraint(expr= m.x6711 - 5*m.b7127 >= 0) m.c5739 = Constraint(expr= m.x6712 - 25*m.b7128 >= 0) m.c5740 = Constraint(expr= m.x6713 - 30*m.b7129 >= 0) m.c5741 = Constraint(expr= m.x6714 - 5*m.b7130 >= 0) m.c5742 = Constraint(expr= m.x6715 - 25*m.b7134 >= 0) m.c5743 = Constraint(expr= m.x6716 - 5*m.b7133 >= 0) m.c5744 = Constraint(expr= m.x6717 - 25*m.b7134 >= 0) m.c5745 = Constraint(expr= m.x6718 - 30*m.b7138 >= 0) m.c5746 = Constraint(expr= m.x6719 - 25*m.b7137 >= 0) m.c5747 = Constraint(expr= m.x6720 - 30*m.b7138 >= 0) m.c5748 = Constraint(expr= m.x6721 - 5*m.b7136 >= 0) m.c5749 = Constraint(expr= m.x6722 - 25*m.b7137 >= 0) m.c5750 = Constraint(expr= m.x6723 - 30*m.b7138 >= 0) m.c5751 = Constraint(expr= m.x6724 - 30*m.b7141 >= 0) m.c5752 = Constraint(expr= m.x6725 - 25*m.b7140 >= 0) m.c5753 = Constraint(expr= m.x6726 - 30*m.b7141 >= 0) m.c5754 = Constraint(expr= m.x6727 - 5*m.b7139 >= 0) m.c5755 = Constraint(expr= m.x6728 - 25*m.b7140 >= 0) m.c5756 = Constraint(expr= m.x6729 - 30*m.b7141 >= 0) m.c5757 = Constraint(expr= m.x6730 - 5*m.b7142 >= 0) m.c5758 = Constraint(expr= m.x6731 - 25*m.b7146 >= 0) m.c5759 = Constraint(expr= m.x6732 - 5*m.b7145 >= 0) m.c5760 = Constraint(expr= m.x6733 - 25*m.b7146 >= 0) m.c5761 = Constraint(expr= m.x6734 - 30*m.b7150 >= 0) m.c5762 = Constraint(expr= m.x6735 - 25*m.b7149 >= 0) m.c5763 = Constraint(expr= m.x6736 - 30*m.b7150 >= 0) m.c5764 = Constraint(expr= m.x6737 - 5*m.b7148 >= 0) m.c5765 = Constraint(expr= m.x6738 - 25*m.b7149 >= 0) m.c5766 = Constraint(expr= m.x6739 - 30*m.b7150 >= 0) m.c5767 = Constraint(expr= m.x6740 - 30*m.b7153 >= 0) m.c5768 = Constraint(expr= m.x6741 - 25*m.b7152 >= 0) m.c5769 = Constraint(expr= m.x6742 - 30*m.b7153 >= 0) m.c5770 = Constraint(expr= m.x6743 - 5*m.b7151 >= 0) m.c5771 = Constraint(expr= m.x6744 - 25*m.b7152 >= 0) m.c5772 = Constraint(expr= m.x6745 - 30*m.b7153 >= 0) m.c5773 = Constraint(expr= m.x6746 - 5*m.b7154 >= 0) m.c5774 = Constraint(expr= m.x6747 - 25*m.b7158 >= 0) m.c5775 = Constraint(expr= m.x6748 - 5*m.b7157 >= 0) m.c5776 = Constraint(expr= m.x6749 - 25*m.b7158 >= 0) m.c5777 = Constraint(expr= m.x6750 - 30*m.b7162 >= 0) m.c5778 = Constraint(expr= m.x6751 - 25*m.b7161 >= 0) m.c5779 = Constraint(expr= m.x6752 - 30*m.b7162 >= 0) m.c5780 = Constraint(expr= m.x6753 - 5*m.b7160 >= 0) m.c5781 = Constraint(expr= m.x6754 - 25*m.b7161 >= 0) m.c5782 = Constraint(expr= m.x6755 - 30*m.b7162 >= 0) m.c5783 = Constraint(expr= m.x6756 - 30*m.b7165 >= 0) m.c5784 = Constraint(expr= m.x6757 - 25*m.b7164 >= 0) m.c5785 = Constraint(expr= m.x6758 - 30*m.b7165 >= 0) m.c5786 = Constraint(expr= m.x6759 - 5*m.b7163 >= 0) m.c5787 = Constraint(expr= m.x6760 - 25*m.b7164 >= 0) m.c5788 = Constraint(expr= m.x6761 - 30*m.b7165 >= 0) m.c5789 = Constraint(expr= m.x6762 - 2.5*m.b7166 >= 0) m.c5790 = Constraint(expr= m.x6763 - 12.5*m.b7170 >= 0) m.c5791 = Constraint(expr= m.x6764 - 2.5*m.b7169 >= 0) m.c5792 = Constraint(expr= m.x6765 - 12.5*m.b7170 >= 0) m.c5793 = Constraint(expr= m.x6766 - 15*m.b7174 >= 0) m.c5794 = Constraint(expr= m.x6767 - 12.5*m.b7173 >= 0) m.c5795 = Constraint(expr= m.x6768 - 15*m.b7174 >= 0) m.c5796 = Constraint(expr= m.x6769 - 2.5*m.b7172 >= 0) m.c5797 = Constraint(expr= m.x6770 - 12.5*m.b7173 >= 0) m.c5798 = Constraint(expr= m.x6771 - 15*m.b7174 >= 0) m.c5799 = Constraint(expr= m.x6772 - 15*m.b7177 >= 0) m.c5800 = Constraint(expr= m.x6773 - 12.5*m.b7176 >= 0) m.c5801 = Constraint(expr= m.x6774 - 15*m.b7177 >= 0) m.c5802 = Constraint(expr= m.x6775 - 2.5*m.b7175 >= 0) m.c5803 = Constraint(expr= m.x6776 - 12.5*m.b7176 >= 0) m.c5804 = Constraint(expr= m.x6777 - 15*m.b7177 >= 0) m.c5805 = Constraint(expr= - m.b7018 + m.b7058 + m.b7059 + m.b7060 == 0) m.c5806 = Constraint(expr= - m.b7019 + m.b7061 + m.b7062 + m.b7063 == 0) m.c5807 = Constraint(expr= - m.b7020 + m.b7064 + m.b7065 + m.b7066 == 0) m.c5808 = Constraint(expr= - m.b7021 + m.b7067 + m.b7068 + m.b7069 == 0) m.c5809 = Constraint(expr= - m.b7022 + m.b7070 + m.b7071 + m.b7072 == 0) m.c5810 = Constraint(expr= - m.b7023 + m.b7073 + m.b7074 + m.b7075 == 0) m.c5811 = Constraint(expr= - m.b7024 + m.b7076 + m.b7077 + m.b7078 == 0) m.c5812 = Constraint(expr= - m.b7025 + m.b7079 + m.b7080 + m.b7081 == 0) m.c5813 = Constraint(expr= - m.b7026 + m.b7082 + m.b7083 + m.b7084 == 0) m.c5814 = Constraint(expr= - m.b7027 + m.b7085 + m.b7086 + m.b7087 == 0) m.c5815 = Constraint(expr= - m.b7028 + m.b7088 + m.b7089 + m.b7090 == 0) m.c5816 = Constraint(expr= - m.b7029 + m.b7091 + m.b7092 + m.b7093 == 0) m.c5817 = Constraint(expr= - m.b7030 + m.b7094 + m.b7095 + m.b7096 == 0) m.c5818 = Constraint(expr= - m.b7031 + m.b7097 + m.b7098 + m.b7099 == 0) m.c5819 = Constraint(expr= - m.b7032 + m.b7100 + m.b7101 + m.b7102 == 0) m.c5820 = Constraint(expr= - m.b7033 + m.b7103 + m.b7104 + m.b7105 == 0) m.c5821 = Constraint(expr= - m.b7034 + m.b7106 + m.b7107 + m.b7108 == 0) m.c5822 = Constraint(expr= - m.b7035 + m.b7109 + m.b7110 + m.b7111 == 0) m.c5823 = Constraint(expr= - m.b7036 + m.b7112 + m.b7113 + m.b7114 == 0) m.c5824 = Constraint(expr= - m.b7037 + m.b7115 + m.b7116 + m.b7117 == 0) m.c5825 = Constraint(expr= - m.b7038 + m.b7118 + m.b7119 + m.b7120 == 0) m.c5826 = Constraint(expr= - m.b7039 + m.b7121 + m.b7122 + m.b7123 == 0) m.c5827 = Constraint(expr= - m.b7040 + m.b7124 + m.b7125 + m.b7126 == 0) m.c5828 = Constraint(expr= - m.b7041 + m.b7127 + m.b7128 + m.b7129 == 0) m.c5829 = Constraint(expr= - m.b7042 + m.b7130 + m.b7131 + m.b7132 == 0) m.c5830 = Constraint(expr= - m.b7043 + m.b7133 + m.b7134 + m.b7135 == 0) m.c5831 = Constraint(expr= - m.b7044 + m.b7136 + m.b7137 + m.b7138 == 0) m.c5832 = Constraint(expr= - m.b7045 + m.b7139 + m.b7140 + m.b7141 == 0) m.c5833 = Constraint(expr= - m.b7046 + m.b7142 + m.b7143 + m.b7144 == 0) m.c5834 = Constraint(expr= - m.b7047 + m.b7145 + m.b7146 + m.b7147 == 0) m.c5835 = Constraint(expr= - m.b7048 + m.b7148 + m.b7149 + m.b7150 == 0) m.c5836 = Constraint(expr= - m.b7049 + m.b7151 + m.b7152 + m.b7153
None: kid = self._api_account_headers["Location"] try: _signed_payload = sign_payload( url=url, payload=payload, accountKeyData=self.accountKeyData, kid=kid, nonce=nonce, ) except IOError as exc: self._next_nonce = None raise try: result = url_request( url, post_data=_signed_payload.encode("utf8"), err_msg="_send_signed_request", depth=depth, ) try: _next_nonce = result[2]["Replay-Nonce"] if (not _next_nonce) or (nonce == _next_nonce): self._next_nonce = None else: self._next_nonce = _next_nonce except Exception as exc: self._next_nonce = None pass return result except IndexError: # retry bad nonces (they raise IndexError) self._next_nonce = None return self._send_signed_request( url, payload=payload, depth=(depth + 1), ) def _poll_until_not(self, _url, _pending_statuses, _log_message): """ Originally from acme-tiny :param _url: (required) The url :param _pending_statuses: (required) The statuses we will continue polling until we lose :param depth: (optional) An integer nothing the depth of this function being called """ log.info("acme_v2.AuthenticatedUser._poll_until_not {0}".format(_log_message)) _result, _t0 = None, time.time() while _result is None or _result["status"] in _pending_statuses: log.debug(") polling...") assert time.time() - _t0 < 3600, "Polling timeout" # 1 hour timeout time.sleep(0 if _result is None else 2) _result, _status_code, _headers = self._send_signed_request( _url, payload=None, ) return _result def update_contact(self, ctx, contact=None): """ :param ctx: (required) A :class:`lib.utils.ApiContext` instance :param contact: (optional) The updated contact info :param is_registration: (optional) Boolean """ log.info("acme_v2.AuthenticatedUser.update_contact( {0}".format(contact)) payload_contact = {"contact": contact} ( acme_account_object, _status_code, _acme_account_headers, ) = self._send_signed_request( self._api_account_headers["Location"], payload=payload_contact, ) self._api_account_object = acme_account_object log.debug(") update_contact | acme_account_object: %s" % acme_account_object) log.debug( ") update_contact | _acme_account_headers: %s" % _acme_account_headers ) log.info( ") update_contact | updated {0}".format( " ; ".join(acme_account_object["contact"]) ) ) def authenticate(self, ctx, contact=None, onlyReturnExisting=None): """ :param ctx: (required) A :class:`lib.utils.ApiContext` instance :param contact: (optional) The contact info :param onlyReturnExisting: bool. Default None. see ACME-spec (docs below) returns: False - no matching account or True - matching account https://tools.ietf.org/html/rfc8555#section-7.3 A client creates a new account with the server by sending a POST request to the server's newAccount URL. The body of the request is a stub account object containing some subset of the following fields: contact (optional, array of string): Same meaning as the corresponding server field defined in Section 7.1.2. termsOfServiceAgreed (optional, boolean): Same meaning as the corresponding server field defined in Section 7.1.2. onlyReturnExisting (optional, boolean): If this field is present with the value "true", then the server MUST NOT create a new account if one does not already exist. This allows a client to look up an account URL based on an account key (see Section 7.3.1). externalAccountBinding (optional, object): Same meaning as the corresponding server field defined in Section 7.1.2 ... The server creates an account and stores the public key used to verify the JWS (i.e., the "jwk" element of the JWS header) to authenticate future requests from the account. The server returns this account object in a 201 (Created) response, with the account URL in a Location header field. The account URL is used as the "kid" value in the JWS authenticating subsequent requests by this account (see Section 6.2). The account URL is also used for requests for management actions on this account, as described below. ... Example - Request POST /acme/new-account HTTP/1.1 Host: example.com Content-Type: application/jose+json { "protected": base64url({ "alg": "ES256", "jwk": {...}, "nonce": "6S8IqOGY7eL2lsGoTZYifg", "url": "https://example.com/acme/new-account" }), "payload": base64url({ "termsOfServiceAgreed": true, "contact": [ "mailto:<EMAIL>", "mailto:<EMAIL>" ] }), "signature": "RZPOnYoPs1PhjszF...-nh6X1qtOFPB519I" } Example - Response HTTP/1.1 201 Created Content-Type: application/json Replay-Nonce: D8s4D2mLs8Vn-goWuPQeKA Link: <https://example.com/acme/directory>;rel="index" Location: https://example.com/acme/acct/evOfKhNU60wg { "status": "valid", "contact": [ "mailto:<EMAIL>", "mailto:<EMAIL>" ], "orders": "https://example.com/acme/acct/evOfKhNU60wg/orders" } """ log.info("acme_v2.AuthenticatedUser.authenticate(") if self.acme_directory is None: raise ValueError("`acme_directory` is required") if "newAccount" not in self.acme_directory: raise ValueError("directory does not support `newAccount`") # log the event to the db self.acmeLogger.log_newAccount("v2", transaction_commit=True) # hit the acme api for the registration try: """possible api values for newAccount payload are: {"contact": None, "termsOfServiceAgreed": None, "onlyReturnExisting": None, "externalAccountBinding": None, } """ payload_registration = { "termsOfServiceAgreed": True, } if contact is not None: # contact should be a LIST of URI if "@" in contact and (not contact.startswith("mailto:")): contact = "mailto:%s" % contact payload_registration["contact"] = [ contact, ] # spec wants a list if onlyReturnExisting is not None: payload_registration["onlyReturnExisting"] = onlyReturnExisting try: ( acme_account_object, status_code, acme_account_headers, ) = self._send_signed_request( self.acme_directory["newAccount"], payload=payload_registration, ) except errors.AcmeServerError as exc: # only catch this if `onlyReturnExisting` and there is an DNE error if onlyReturnExisting: if exc.args[0] == 400: if ( exc.args[1]["type"] == "urn:ietf:params:acme:error:accountDoesNotExist" ): log.debug( ") authenticate | check failed. key is unknown to server" ) event_payload_dict = utils.new_event_payload_dict() event_payload_dict["acme_account.id"] = self.acmeAccount.id event_payload_dict["acme_account.check"] = False dbOperationsEvent = self.log__OperationsEvent( ctx, model_utils.OperationsEventType.from_string( "AcmeAccount__check" ), event_payload_dict, ) raise exc self._api_account_object = acme_account_object self._api_account_headers = acme_account_headers log.debug(") authenticate | acme_account_object: %s" % acme_account_object) log.debug( ") authenticate | acme_account_headers: %s" % acme_account_headers ) log.info( ") authenticate = %s" % ( "acme_v2 Registered!" if status_code == 201 else "Already registered!" ) ) # this would raise if we couldn't authenticate self.acmeAccount.timestamp_last_authenticated = ctx.timestamp ctx.dbSession.flush(objects=[self.acmeAccount]) # log this event_payload_dict = utils.new_event_payload_dict() event_payload_dict["acme_account.id"] = self.acmeAccount.id dbOperationsEvent = self.log__OperationsEvent( ctx, model_utils.OperationsEventType.from_string( "AcmeAccount__authenticate" ), event_payload_dict, ) return True except Exception as exc: raise def deactivate(self, ctx, transaction_commit=None): """ :param ctx: (required) A :class:`lib.utils.ApiContext` instance Deactivates the authenticated user against the Acme directory https://tools.ietf.org/html/rfc8555#section-7.3.6 https://tools.ietf.org/html/draft-ietf-acme-acme-13#section-7.3.7 A client can deactivate an account by posting a signed update to the account URL with a status field of "deactivated". POST /acme/acct/evOfKhNU60wg HTTP/1.1 Host: example.com Content-Type: application/jose+json { "protected": base64url({ "alg": "ES256", "kid": "https://example.com/acme/acct/evOfKhNU60wg", "nonce": "ntuJWWSic4WVNSqeUmshgg", "url": "https://example.com/acme/acct/evOfKhNU60wg" }), "payload": base64url({ "status": "deactivated" }), "signature": "earzVLd3m5M4xJzR...bVTqn7R08AKOVf3Y" } https://tools.ietf.org/html/draft-ietf-acme-acme-13#section-7.3.7 """ log.info("acme_v2.AuthenticatedUser.deactivate(") if transaction_commit is not True: # required for the `AcmeLogger` raise ValueError("we must invoke this knowing it will commit") if self.acme_directory is None: raise ValueError("`acme_directory` is required") _account_url = self._api_account_headers["Location"] if not _account_url: raise ValueError("Account URL unknown") is_did_deactivate = None try: _payload_deactivate = {"status": "deactivated"} ( acme_account_object, status_code, acme_account_headers, ) = self._send_signed_request( _account_url, payload=_payload_deactivate, ) # this is a flag is_did_deactivate = True log.debug(") deactivate | acme_account_object: %s" % acme_account_object) log.debug(") deactivate | acme_account_headers: %s" % acme_account_headers) log.info( ") deactivate = %s" % ("acme_v2 DEACTIVATED!" if status_code == 200 else "ERROR") ) # this would raise if we couldn't authenticate db_update.update_AcmeAccount__set_deactivated(ctx, self.acmeAccount) ctx.dbSession.flush(objects=[self.acmeAccount]) # log this event_payload_dict = utils.new_event_payload_dict() event_payload_dict["acme_account.id"] = self.acmeAccount.id dbOperationsEvent = self.log__OperationsEvent( ctx, model_utils.OperationsEventType.from_string("AcmeAccount__deactivate"), event_payload_dict, ) finally: return is_did_deactivate def key_change(self, ctx, dbAcmeAccountKey_new, transaction_commit=None): """ :param ctx: (required) A :class:`lib.utils.ApiContext` instance :param dbAcmeAccountKey_new: (required) a :class:`model.objects.AcmeAccountKey` instance Performs a key change rollover https://tools.ietf.org/html/rfc8555#section-7.3.5 https://tools.ietf.org/html/draft-ietf-acme-acme-13#section-7.3.6 POST /acme/key-change HTTP/1.1 Host: example.com Content-Type: application/jose+json { "protected": base64url({ "alg": "ES256", "kid": "https://example.com/acme/acct/1", "nonce": "K60BWPrMQG9SDxBDS_xtSw", "url": "https://example.com/acme/key-change" }), "payload": base64url({ "protected": base64url({ "alg": "ES256", "jwk": /* new key */, "url": "https://example.com/acme/key-change" }), "payload": base64url({ "account": "https://example.com/acme/acct/1", "oldKey": /* old key */ }), "signature": "Xe8B94RD30Azj2ea...8BmZIRtcSKPSd8gU" }), "signature": "5TWiqIYQfIDfALQv...x9C2mg8JGPxl5bI4" } """ log.info("acme_v2.AuthenticatedUser.key_change(") if transaction_commit is not True: # required for the `AcmeLogger` raise ValueError("we must invoke this knowing it will commit") if self.acme_directory is None: raise ValueError("`acme_directory` is required") if "keyChange" not in self.acme_directory: raise ValueError("directory does not support `keyChange`") _account_url = self._api_account_headers["Location"] if not _account_url: raise ValueError("Account URL unknown") is_did_keychange = None try: # quickref and toggle these, so we generate the correct payloads accountKeyData_old = self.accountKeyData accountKeyData_new = cert_utils.AccountKeyData( key_pem=dbAcmeAccountKey_new.key_pem, ) _key_change_url = self.acme_directory["keyChange"] _payload_inner = { "account": _account_url, "oldKey": accountKeyData_old.jwk, } payload_inner = sign_payload_inner( url=_key_change_url, payload=_payload_inner, accountKeyData=accountKeyData_new, ) (acme_response, status_code, acme_headers,) = self._send_signed_request( _key_change_url, payload=payload_inner, ) is_did_keychange = True log.debug(") key_change | acme_response: %s" % acme_response) log.debug(") key_change | acme_headers: %s" % acme_headers) # assuming things worked... self.accountKeyData = accountKeyData_new # turn off the old and flush, so the index is maintained dbAcmeAccountKey_old = self.acmeAccount.acme_account_key dbAcmeAccountKey_old.is_active = None dbAcmeAccountKey_old.timestamp_deactivated = ctx.timestamp ctx.dbSession.flush(objects=[dbAcmeAccountKey_old]) # turn on the new and flush self.acmeAccount.acme_account_key = dbAcmeAccountKey_new dbAcmeAccountKey_new.is_active = True ctx.dbSession.flush( objects=[ dbAcmeAccountKey_new, self.acmeAccount, ] ) # log this event_payload_dict = utils.new_event_payload_dict() event_payload_dict["acme_account.id"] = self.acmeAccount.id event_payload_dict["acme_account_key-old.id"] = dbAcmeAccountKey_old.id event_payload_dict["acme_account_key-new.id"] = dbAcmeAccountKey_new.id dbOperationsEvent = self.log__OperationsEvent( ctx, model_utils.OperationsEventType.from_string("AcmeAccount__key_change"), event_payload_dict, ) finally: return is_did_keychange def acme_order_load(self, ctx, dbAcmeOrder, transaction_commit=None): """ :param ctx: (required) A :class:`lib.utils.ApiContext` instance :param dbAcmeOrder: (required) a :class:`model.objects.AcmeOrder` instance """ log.info("acme_v2.AuthenticatedUser.acme_order_load(") if transaction_commit
<gh_stars>0 ''' Created on May 29, 2017 @author: husensofteng ''' import matplotlib from numpy.lib.function_base import average import math matplotlib.use('TkAgg') from matplotlib.pyplot import tight_layout import seaborn as sns import matplotlib.pyplot as plt plt.style.use('seaborn-ticks') import matplotlib.patches as patches from matplotlib import transforms import matplotlib.patheffects from matplotlib.font_manager import FontProperties import matplotlib as mpl import numpy as np import matplotlib.gridspec as gridspec from matplotlib import rcParams, ticker from operator import itemgetter import sys from utils import * def draw_steam_lines(ax): #no motif no signal draw_text(ax, x=4, y=4.75, text="Identification of CFRMs", fontsize=12) y_text_checks = 4 stem_line(ax, x=[1], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='grey', stemline_color='grey') draw_marker(ax, x=1,y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=1,y=-1.75, text="No peak\nNo motif") #ax.plot([i for i in range(0,3)], [0 for i in range(0,3)], color='grey') #effective motif but no signal stem_line(ax, x=[4.5], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='red', stemline_color='grey') ax.add_patch(patches.FancyBboxPatch((4, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" draw_marker(ax, x=4.5,y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=4.5,y=-1.25, text="No peak") #ax.plot([i for i in np.arange(3.5,6.5)], [0 for i in np.arange(3.5,6.5)], color='grey') #signal but no motif stem_line(ax, x=[10], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='grey', stemline_color='grey') plot_sin(ax, shift_x=8) draw_marker(ax, x=10,y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=10,y=-1.25, text="No motif") #ax.plot([i for i in range(8,13)], [0 for i in range(8,13)], color='grey') #signal and motif but not effective stem_line(ax, x=[15], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='grey', stemline_color='grey') ax.add_patch(patches.FancyBboxPatch((14.5, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" plot_sin(ax, shift_x=13) draw_marker(ax, x=15,y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=15,y=-1.75, text="No sig. effect\non motif") #ax.plot([i for i in range(13,18)], [0 for i in range(13,18)], color='grey') #signal and motif but not significant stem_line(ax, x=[20], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='red', stemline_color='grey') ax.add_patch(patches.FancyBboxPatch((19.5, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" plot_sin(ax, shift_x=18) draw_marker(ax, x=20,y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=20,y=-1.75, text="No sig. MFS") #ax.plot([i for i in range(18,23)], [0 for i in range(18,23)], color='grey') #effective motif, signal and significant stem_line(ax, x=[25.5], y=[2], marker='o', markerfacecolor='green', markeredgecolor='red', stemline_color='green') ax.add_patch(patches.FancyBboxPatch((25, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" plot_sin(ax, shift_x=23.5) draw_marker(ax, x=25.5,y=y_text_checks, marker='$\\checkmark$') draw_marker(ax, x=26.4,y=3, marker='*', color='black', markersize=7) draw_text(ax, x=26.4,y=-2.25, text="Sig. MFS\nSig. effect on motif \nDNase1 peak ") draw_marker(ax, x=23.15,y=-1, marker='*', color='black', markersize=7) draw_marker(ax, x=23.15,y=-1.55, marker='o', color='green', markersize=5, markeredgecolor='red', markeredgewidth=1) plot_sin(ax, shift_x=22.9, y_shift=-2.45, R=0.5, A=-0.5, B=-0.5, color='blue') #ax.plot([i for i in np.arange(23.5,28.5)], [0 for i in np.arange(23.5,28.5)], color='grey') #effective motif and same tf peak stem_line(ax, x=[32.5], y=[2], marker='.', markerfacecolor='grey', markeredgecolor='red', stemline_color='grey') ax.add_patch(patches.FancyBboxPatch((32, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" plot_sin(ax, shift_x=30.5, color='grey', linestyle='-') draw_marker(ax, x=32.5, y=y_text_checks, marker='$\\times$', color='black') draw_text(ax, x=32.5,y=-1.75, text="No motif-matcing\nTF-peak") #ax.plot([i for i in np.arange(30.5,35.5)], [0 for i in np.arange(30.5,35.5)], color='grey') #effective motif and same tf peak stem_line(ax, x=[38], y=[2], marker='o', markerfacecolor='green', markeredgecolor='red', stemline_color='green') ax.add_patch(patches.FancyBboxPatch((37.5, 0.1), 1, 0.1, edgecolor = 'orange', boxstyle='round', fill=False, linewidth=2.0))#facecolor="green" plot_sin(ax, shift_x=36, color='orange', linestyle='-') #plot_sin(ax3, shift_x=36, R=1, A=3,B=3,color='orange', linestyle='-') draw_marker(ax, x=38,y=y_text_checks, marker='$\\checkmark$') draw_text(ax, x=38.65,y=-2.25, text="Sig. effect on motif\nMotif-matching \nTF-peak ") draw_marker(ax, x=35.65,y=-1, marker='o', color='green', markersize=5, markeredgecolor='red', markeredgewidth=1) plot_sin(ax, shift_x=35.4, y_shift=-1.95, R=0.5, A=-0.5, B=-0.5, linestyle='-', color='orange') ##ax.plot([i for i in range(36,41)], [0 for i in range(36,41)], color='grey') ax.plot([i for i in np.arange(0,42)], [0 for i in np.arange(0,42)], color='grey', linewidth=1.0) ax.plot([i for i in np.arange(-1,43)], [-2.75 for i in np.arange(-1,43)], color='grey', linewidth=1.0) ax.plot([i for i in np.arange(9.5,43)], [5 for i in np.arange(9.5,43)], color='grey', linewidth=1.0) ax.plot([-1 for i in np.arange(-2.75,6)], [i for i in np.arange(-2.75,6)], color='grey', linewidth=1.0) ax.plot([42 for i in np.arange(-2.75,6)], [i for i in np.arange(-2.75,6)], color='grey', linewidth=1.0) ax.set_xlim(-1,42) ax.set_ylim(-4,5) ax.get_yaxis().set_visible(False) ax.get_xaxis().set_visible(False) ax.set_frame_on(False) #ax.xaxis.set_ticks([]) #mutate rates #part B def rate_per_chromatin_state(muts_input_file, ax, names='chr,start,Annotation'.split(','), usecols = [0,1,9], d = 300, chr_in=0, window_size=1000000): df = pd.read_table(muts_input_file, sep='\t', skiprows=0, header=None, usecols=usecols, names=names, nrows=1000) #plt.figure(figsize=(12, 12)) df['chr'] = df['chr'].apply(getchrnum) if chr_in>0: df = df[df['chr']==chr_in] df['x'] = df['chr'] + df['start'].apply(float).apply(get_xaxis, args=(window_size, d,)) df['State'] = df['Annotation'].apply(get_state) df['NumberMutsPerWindowPerState'] = df.groupby(by=['State','x'])['start'].transform('count') avg_per_state_per_mb = {} for i, r in df.iterrows(): try: avg_per_state_per_mb[r['State']][r['x']] = r['NumberMutsPerWindowPerState'] except KeyError: avg_per_state_per_mb[r['State']] = {r['x']: r['NumberMutsPerWindowPerState']} #plt.show() states = [] counts = [] for k in sorted(avg_per_state_per_mb.keys()): for i in range(len(avg_per_state_per_mb[k]), len(df['x'].unique())): avg_per_state_per_mb[k][str(i)+"nomut"]=0 for w in sorted(avg_per_state_per_mb[k]): states.append(k) #if avg_per_state_per_mb[k][w]>0.0: # counts.append(math.log10(avg_per_state_per_mb[k][w]/2515.0)) #else: counts.append(avg_per_state_per_mb[k][w]/2515.0) #sns.set_style("white", {'axes.linewidth': 0.5}) #sns.set_context("talk") #plt.figure(figsize=(4, 2)) states_order = 'Tx,Tss,Enh,Repr,NA,Quies'.split(',') #hue_colors = {'TssA': , 'TssAFlnk': ,'TssBiv': , 'Tx': ,'TxFlnk': , 'TxWk': ,'BivFlnk': , 'Enh': ,'EnhBiv': ,'EnhG': , 'Het': ,'ZNF/Rpts': ,'ReprPCWk': ,'ReprPC': ,'Quies': ,'NoState':} plt_results = sns.barplot(states, counts, estimator=average, ci=95, ax=ax, order=states_order, hue_order=states_order, palette=['red', 'green', 'orange', 'brown', 'grey','lightgray'], errwidth=0.5, capsize=0.15, orient='v')#'BuGn_r') plt_results.set_xticklabels(plt_results.get_xticklabels(), rotation=0) #plt.setp(ax.spines.values(), linewidth=1) ax.spines['left'].set_linewidth(1)#set line width fo the y axis ax.spines['left'].set_color('grey') #plt_results.set_yticklabels(range(0,10)) #draw_text(plt_results, x = 0, y=3, text='c') ax.set_ylabel("Average mutation\nrate per megabse") tick_spacing=1 ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing)) #ax.plot([-0.5 for i in np.arange(0,5)], [i for i in range(0,5)], linewidth=1.0, color='grey') #draw_text(ax, x=0, y=3, text='c', color='black', fontsize=10) def plot_muts(muts_input_file, names='chr,start,DNase1,TFBinding'.split(','), usecols = [0,1,10,11], motifs=True, d = 6000.0, chr_in=0, window_size=50000, ymax_value = 1.0): df = pd.read_table(muts_input_file, sep='\t', skiprows=0, header=None, usecols=usecols, names=names, nrows=1000) #plt.figure(figsize=(12, 12)) df['chr'] = df['chr'].apply(getchrnum) if chr_in>0: df = df[df['chr']==chr_in] df['x'] = df['chr'] + df['start'].apply(float).apply(get_xaxis, args=(window_size, d,)) if motifs: df['Active'] = np.where((df['DNase1']>1e-300) | (df['TFBinding']>1e-300), True, False)#col='ChromatinState', col_wrap=5, else: df['Active'] = df['Annotation'].str.contains('DNase1|TFBinding', na=False) #df['Active'] = np.where(('TFBinding' in df['Annotation']) | ('DNase1' in df['Annotation']), 'Yes', 'No') #df['x'] = df.start.apply(get_xaxis, args=(int(df['chr'].replace('X','23').replace('Y','24').replace('M','25').replace('chr','')),)) #df['y'] = df.start.apply(get_yaxis) #df['ChromatinState'] = df['ChromatinState'].apply(get_unique_state) active_windows = [] active_window_counts = [] unactive_windows = [] unactive_window_counts = [] chromosomes = [] max_window_numbers_per_chr = [] hyper_mutated_windows = {} for active_label, df_activity in df.groupby('Active'): for chr_label, df_chr in df_activity.groupby('chr'): chromosomes.append(chr_label) max_window_numbers_per_chr.append(df_chr['x'].max()) windows = [] window_counts = [] for window_label, df_window in df_chr.groupby('x'): num_muts_in_window = len(df_window)/2515.0 if not motifs: if num_muts_in_window>ymax_value:#2515 hyper_mutated_windows[window_label]=num_muts_in_window num_muts_in_window = ymax_value#2500 if num_muts_in_window==0: windows.append('nan') window_counts.append('nan') windows.append(window_label) window_counts.append(num_muts_in_window) #window_counts.append(math.log(len(df_window), 2)) if num_muts_in_window==0: windows.append('nan') window_counts.append('nan') if active_label: active_windows.extend(windows) active_window_counts.extend(window_counts) active_windows.append('nan') active_window_counts.append('nan') else: unactive_windows.extend(windows) unactive_window_counts.extend(window_counts) unactive_windows.append('nan') unactive_window_counts.append('nan') return active_windows, active_window_counts, unactive_windows, unactive_window_counts, chromosomes, max_window_numbers_per_chr, hyper_mutated_windows def plot_lines(windows, chromosomes, ax, max_window_numbers_per_chr, hyper_mutated_windows, ymax_value = 1.0): ax.plot(windows[0], windows[1], windows[2], windows[3], windows[4], windows[5], windows[6], windows[7], windows[8], windows[9], windows[10], windows[11],) ax.spines['left'].set_color('grey') ax.spines['left'].set_linewidth(1) #ax.spines.values(), linewidth=1, color='grey') #ax.set_xticklabels(labels=chromosomes) chromosomes = np.array(list(set(chromosomes))) chromosomes_pos = ((np.array(max_window_numbers_per_chr[0:len(chromosomes)])-chromosomes)/2.0)+chromosomes chromosomes = list(chromosomes) ax.set_xlabel("chromosomes (hg19)") ax.set_ylabel("Average mutation rate per 50Kb") #plot hyper mutated elements names_for_hyper_elements = {2:'IGK', 14:'IGH', 22:'IGL'} print hyper_mutated_windows for hyper_element in hyper_mutated_windows.keys(): draw_text(ax, x=hyper_element, y=1+0.05, text='.', color='black', fontsize=8, horizontalalignment='left', rotation=0) draw_text(ax, x=hyper_element, y=1+0.10, text='.', color='black', fontsize=8, horizontalalignment='left', rotation=0) draw_text(ax, x=hyper_element, y=1+0.15, text='.', color='black', fontsize=8, horizontalalignment='left', rotation=0) draw_text(ax, x=hyper_element, y=ymax_value+0.3, text=names_for_hyper_elements[int(hyper_element)] + ' (%0.2f)' % hyper_mutated_windows[hyper_element], color='black', fontsize=8, horizontalalignment='left', rotation=45) for i,chr in enumerate(chromosomes): if chr==23: chromosomes[i] = 'X' elif chr==24: chromosomes[i] = 'Y' ax.set_xticks(list(chromosomes_pos))#,list(chromosomes)) ax.set_xticklabels(chromosomes) ax.minorticks_off() ax.set_ylim(0.0,ymax_value) #ax.tick_params(axis='x', direction='out', length=1, width=1, colors='black', ) #tick_spacing=1 #ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing)) def draw_piechart_muts(ax, radius=1.0, center=(0,0)): labels = ['No overlap', 'TFBS/DHS', 'TFBS/DHS - Matching cell line'] sizes = [25277342-21698761, 21698761-937338, 937338] colors = ['lightgrey', '#C0C0C0', 'grey'] explode = (0.0, 0.0, 0.1) pie_wedge_collection = ax.pie(sizes, explode = explode, labels=labels, colors=colors, startangle=90, labeldistance=1.05, autopct='%1.1f%%', radius=radius, center=center) for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') ax.axis('equal') return def draw_piechart_motifs(ax, radius=1.0, center=(0,0)): labels = ['No matching TFBS/DHS', 'DHS - Matching cell line', 'Matching TFBS - Matching cell line'] sizes = [3935511-222800-72317, 222800, 72317] colors = ['lightgrey', '#C0C0C0', 'grey'] explode = (0.0, 0.1, 0.1) pie_wedge_collection = ax.pie(sizes, explode = explode, labels=labels, colors=colors, startangle=90, labeldistance=1.05, autopct='%1.1f%%', radius=radius, center=center) for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') ax.axis('equal') return def draw_plot(): sns.set_style('white', {'text.color': '.15'}) #matplotlib.rc('axes',edgecolor='grey') #mpl.rcParams['font.family'] = fontfamily #rcParams['font.sans-serif'] = ['Verdana'] #rcParams['svg.fonttype'] = 'none' fig = plt.figure(figsize=(12,8))#design a figure with the given size gs = gridspec.GridSpec(3, 3, height_ratios=[3 , 6, 3.5], wspace=0.0, hspace=0.0)#create 2 rows and three columns with the given ratio for each ax0 = fig.add_subplot(gs[1, :]) ax1 = fig.add_subplot(gs[2, :])#take the entire first row for the first sub plot ax2 = fig.add_subplot(gs[0, -1:]) #take the last col of the second row for sub fig3 #ax2 = fig.add_subplot(gs[0, :-1]) #the col1 and 2 of the second row for sub fig2 ax3 = fig.add_subplot(gs[0, 0]) #the col1 and 2 of the second row for sub fig2 ax4 = fig.add_subplot(gs[0, 1]) #ax4 = fig.add_subplot(gs[0, 1]) #the col1 and 2 of the second row for sub fig2 #ax7 = fig.add_subplot(gs[1, 0]) #ax8 = fig.add_subplot(gs[1, 1]) draw_piechart_muts(ax3) draw_piechart_motifs(ax4) #ax2.get_xaxis().set_visible(False) #ax2.get_yaxis().set_visible(False) #f, (ax1, ax2) = plt.subplots(2, figsize=(12,6)) #Plot all muts for all chromosomes muts_input_file = '../analysis/data/observed_agreement_22May2017_annotated.bed10' motif_muts_input_file = '../analysis/data/motifmuts_all.bed12' active_windows, active_window_counts, unactive_windows, unactive_window_counts, chromosomes, max_window_numbers_per_chr, hyper_mutated_windows = plot_muts(muts_input_file, names='chr,start,Annotation'.split(','), usecols = [0,1,9], motifs=False, chr_in=0,
struct.pack(str(record) + "f", *dg['beamAngleReRx_deg']) buffer += struct.pack(str(record) + "f", *dg['beamAngleCorrection_deg']) buffer += struct.pack(str(record) + "f", *dg['twoWayTravelTime_sec']) buffer += struct.pack(str(record) + "f", *dg['twoWayTravelTimeCorrection_sec']) buffer += struct.pack(str(record) + "f", *dg['deltaLatitude_deg']) buffer += struct.pack(str(record) + "f", *dg['deltaLongitude_deg']) buffer += struct.pack(str(record) + "f", *dg['z_reRefPoint_m']) buffer += struct.pack(str(record) + "f", *dg['y_reRefPoint_m']) buffer += struct.pack(str(record) + "f", *dg['x_reRefPoint_m']) buffer += struct.pack(str(record) + "f", *dg['beamIncAngleAdj_deg']) buffer += struct.pack(str(record) + "H", *dg['realTimeCleanInfo']) buffer += struct.pack(str(record) + "H", *dg['SIstartRange_samples']) buffer += struct.pack(str(record) + "H", *dg['SIcentreSample']) buffer += struct.pack(str(record) + "H", *dg['SInumSamples']) return bz2.compress(buffer) def encodeArrayIntoUintX(self, A, res): ''' Differential encoding of an array of values into a byte array A: An array of values res: Desired resolution. This determines whether the encoding is in an 8-bit or 16-bit array. Details provided below. returns: bytes buffer containing packed values and metadata to unpack it. The data is differentially encoded, meaning that the difference in sequential values is calculated, then the minimum differential value is subtracted off the array before scaling each value by max_bits / (max-min). max_bits is 255 for uint8 encoding and 65535 for uint16 encoding. To determine the encoding, (max-min) / max_bits is compared to the desired resolution to ensure the minimum increment falls below it. uint8 is checked first, if it fails, uint16 is checked. If it also fails, uint32 is used and no actual compression is achieved. A buffer is created from the result containing everything needed to decipher it. Specifically: The first value of the original array as a 4-byte float Min difference values as 4-byte float. Max difference value as a 4-byte float. The number of bits used in the encoding (8 or 16) as a uint8. The number of difference values (len(A)-1) as an 4-byte unsigned int The array of scaled difference values cast to unsigned "max_bits" integers ''' if isinstance(A, list): A = np.array(A) # There are two strategies taken here. Sometimes the # data varies smoothly but over a large range, and it # is more efficient to encode the data's sequential # differences, since they are small in amplitude. # But sometimes the data is very stochastic and the # first range of differences are large relative to # the maximum and minimum values in the data. For # example consider the sequence [0 2 0]. The range # of the values is 2, but the range of the first # differences is 4 (+2 - -2). In this case, it is # more efficient to encode the values themselves. valuesToEncode = np.diff(A.flatten()) maxv = np.max(valuesToEncode) minv = np.min(valuesToEncode) maxA = np.max(A) minA = np.min(A) # print("maxvaluesToEncode:%f, minvaluesToEncode:%f" % (maxv,minv)) # print("maxA:%f, minA:%f" % (maxA,minA)) differentialEncode = True if (maxA - minA) < (maxv - minv): differentialEncode = False maxv = maxA minv = minA valuesToEncode = A[1:] # print("Encoding: %s" % differentialEncode) if ((maxv - minv) / 255.0) < res: bits = 8 elif ((maxv - minv) / 65535.0) < res: bits = 16 else: bits = 32 # print("CANNOT Maintain Resolution - Loss of Data!") # print("max diff: %f, min diff: %f, res: %f" % (maxv, minv, res)) # bits = 16 # return None # print(bits) if maxv == minv: # Value is constant. scaleFactor = 1.0 else: if bits == 8: scaleFactor = 255.0 / (maxv - minv) elif bits == 16: scaleFactor = 65535.0 / (maxv - minv) else: scaleFactor = 4294967295.0 / (maxv - minv) tmp = (((valuesToEncode - minv) * scaleFactor)).astype(int) # This bullshit gets around an apparant bug in the struct module. if isinstance(A[0], np.ndarray): tmp2 = A[0].tolist() else: tmp2 = A[0] if isinstance(tmp2, np.int64) or isinstance(tmp2, np.float64): buffer = struct.pack('f', tmp2) else: buffer = struct.pack('f', tmp2[0]) # buffer = struct.pack('f',float(A[0][0])) N = len(tmp) buffer += struct.pack('f', minv) buffer += struct.pack('f', maxv) # Set a marker by recording the number of points # to encode as a negative number to indicate that # the fields have been differentially encoded. if differentialEncode: buffer += struct.pack('i', -N) else: buffer += struct.pack('i', N) buffer += struct.pack('B', bits) if bits == 8: buffer += struct.pack(str(N) + 'B', *tmp) if bits == 16: buffer += struct.pack(str(N) + 'H', *tmp) if bits == 32: buffer += struct.pack(str(N) + 'I', *tmp) return buffer def decodeUintXintoArray(self, buffer): ''' Decodes differential-encoded data from X-bit unsigned integers into a float array. See encodeArrayIntoUintX(). ''' fields = struct.unpack('fffiB', buffer[0:17]) A0 = fields[0] minv = fields[1] maxv = fields[2] N = fields[3] differentialDecode = False if N < 0: differentialDecode = True N = -N bits = fields[4] if bits == 8: dA = struct.unpack(str(N) + 'B', buffer[17:(17 + N)]) bytesDecoded = 17 + N elif bits == 16: dA = struct.unpack(str(N) + 'H', buffer[17:(17 + N * 2)]) bytesDecoded = 17 + (N * 2) elif bits == 32: dA = struct.unpack(str(N) + 'I', buffer[17:(17 + N * 4)]) bytesDecoded = 17 + (N * 4) if differentialDecode: if bits == 8: orig = np.cumsum( [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv)).tolist() elif bits == 16: orig = np.cumsum( [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv)).tolist() else: orig = np.cumsum( [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv)).tolist() else: if bits == 8: orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 255.0) + minv) elif bits == 16: orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 65535.0) + minv) else: orig = [A0] + list((np.array([float(x) for x in dA]) * (maxv - minv) / 4294967295.0) + minv) # print(A0) # print(minv) # print(maxv) # print(N) # print(bits) return (orig, bytesDecoded) def encodeAndCompressSoundings(self, dg): ''' A method to differential-encode and compress the soundings table. Float values are encoded in this way See encodeArrayIntoUintX() for details on how. Some attempt is made to minimize the impact of non-float fields in the original datagram too. A note about the "res" or resolution argument to encodeArrayIntoUintX(): This field attempts to be the maximum error one can expect between the original value and the final decoded value after encoding. But because it is the first difference in values that are actually encoded, errors accumulate in the decoding process as the decoded differences are cumulateively summed and the errors that result can be larger than the "res" value. Some experimentation is required to ensure sufficient bits are used to reduce the desired error. ''' record = len(dg['soundingIndex']) buffer = struct.pack(str(record) + "H", *dg['soundingIndex']) ## The following optimization has almost no effect ## because of the compressoin applied to the ## sounding buffer: # Valid values for txSectorNumber are 0-7 (probably) # Valid values for detectionType are 0-2 # Valid values for detectionMethod are 0-15. # But detectionMethod > 2 have been reserved for # future use as long as any one can remember. Under # the assumption that Kongsberg won't record more # than 9 detection methods or have more than 9 # transmit sectors, these values can be packed # into a single 8-bit value. tmp = (np.array(dg['detectionType']) * 100. + np.array(dg['detectionMethod']) * 10. + np.array(dg['txSectorNumb'])).astype(int) buffer += struct.pack(str(record) + "B", *tmp) # I don't think there's any way to tell with no ambiguity # when decoding if they were packed or not. For example, # if there were just one tx sector, and only normal type # detections of using amplitude method, the values would # all be 1, which is a valid tx sector value. So I'll leave # these commented out. # else: # buffer += struct.pack(str(record)+"B", *dg['txSectorNumb']) # buffer += struct.pack(str(record)+"B", *dg['detectionType']) # buffer += struct.pack(str(record)+"B", *dg['detectionMethod']) buffer += struct.pack(str(record) + "B", *dg['rejectionInfo1']) buffer += struct.pack(str(record) + "B", *dg['rejectionInfo2'])
'tr': lambda v: dh_centuryAD(v, u'%d. yüzyıl'), 'tt': lambda v: dh_centuryAD(v, u'%d. yöz'), 'uk': lambda v: dh_centuryAD(v, u'%d століття'), 'ur': lambda v: dh_centuryAD(v, u'%2d00صبم'), 'vi': lambda v: dh_centuryAD(v, u'Thế kỷ %d'), 'wa': lambda v: dh_centuryAD(v, u'%dinme sieke'), 'zh': lambda v: dh_centuryAD(v, u'%d世纪'), 'zh-min-nan': lambda v: dh_centuryAD(v, u'%d sè-kí'), }, 'CenturyBC': { 'af': lambda m: multi(m, [ (lambda v: dh_centuryBC(v, u'%dste eeu v.C.'), lambda p: p in (1, 8) or (p >= 20)), (lambda v: dh_centuryBC(v, u'%dde eeu v.C.'), alwaysTrue)]), 'bg': lambda v: dh_centuryBC(v, u'%d век пр.н.е.'), 'br': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'Iañ kantved kt JK'), lambda p: p == 1), (lambda v: dh_constVal(v, 2, u'Eil kantved kt JK'), lambda p: p == 2), (lambda v: dh_centuryBC(v, u'%Re kantved kt JK'), lambda p: p in (2, 3)), (lambda v: dh_centuryBC(v, u'%Rvet kantved kt JK'), alwaysTrue)]), 'ca': lambda v: dh_centuryBC(v, u'Segle %R aC'), 'cs': lambda v: dh_centuryBC(v, u'%d. století př. n. l.'), 'da': lambda v: dh_centuryBC(v, u'%d. århundrede f.Kr.'), 'de': lambda v: dh_centuryBC(v, u'%d. Jahrhundert v. Chr.'), 'el': lambda v: dh_centuryBC(v, u'%dος αιώνας π.Χ.'), 'en': lambda m: multi(m, [ (lambda v: dh_centuryBC(v, u'%dst century BC'), lambda p: p == 1 or (p > 20 and p % 10 == 1)), (lambda v: dh_centuryBC(v, u'%dnd century BC'), lambda p: p == 2 or (p > 20 and p % 10 == 2)), (lambda v: dh_centuryBC(v, u'%drd century BC'), lambda p: p == 3 or (p > 20 and p % 10 == 3)), (lambda v: dh_centuryBC(v, u'%dth century BC'), alwaysTrue)]), 'eo': lambda v: dh_centuryBC(v, u'%d-a jarcento a.K.'), 'es': lambda v: dh_centuryBC(v, u'Siglo %R adC'), 'et': lambda v: dh_centuryBC(v, u'%d. aastatuhat eKr'), 'fi': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'Ensimmäinen vuosisata eaa.'), lambda p: p == 1), (lambda v: dh(v, u'%d00-luku eaa.', lambda i: i - 1, lambda ii: ii[0] + 1), alwaysTrue)]), 'fr': lambda m: multi(m, [ (lambda v: dh_centuryBC(v, u'%Rer siècle av. J.-C.'), lambda p: p == 1), (lambda v: dh_centuryBC(v, u'%Re siècle av. J.-C.'), alwaysTrue)]), 'he': lambda v: dh_centuryBC(v, u'המאה ה־%d לפני הספירה'), 'hr': lambda v: dh_centuryBC(v, u'%d. stoljeće p.n.e.'), 'id': lambda v: dh_centuryBC(v, u'Abad ke-%d SM'), 'io': lambda v: dh_centuryBC(v, u'%dma yar-cento aK'), 'it': lambda v: dh_centuryBC(v, u'%R secolo AC'), 'ja': lambda v: dh_centuryBC(v, u'紀元前%d世紀'), 'ka': lambda v: dh_centuryBC(v, u'ძვ. წ. %R საუკუნე'), 'ko': lambda v: dh_centuryBC(v, u'기원전 %d세기'), 'ksh': lambda v: dh_centuryBC(v, u'%d. Joohunndot füür Kreůßtůß'), # uncertain if ksh is right. might go to redirect. 'la': lambda v: dh_centuryBC(v, u'Saeculum %d a.C.n.'), 'lb': lambda v: dh_centuryBC(v, u'%d. Joerhonnert v. Chr.'), 'nl': lambda v: dh_centuryBC(v, u'%de eeuw v.Chr.'), 'nn': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'1. århundret fvt.'), lambda p: p == 1), (lambda v: dh(v, u'%d00-talet fvt.', lambda i: i - 1, lambda ii: ii[0] + 1), alwaysTrue)]), 'no': lambda v: dh_centuryBC(v, u'%d. århundre f.Kr.'), 'pl': lambda v: dh_centuryBC(v, u'%R wiek p.n.e.'), 'pt': lambda v: dh_centuryBC(v, u'Século %R a.C.'), 'ro': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'Secolul I î.Hr.'), lambda p: p == 1), (lambda v: dh_centuryBC(v, u'Secolul al %R-lea î.Hr.'), alwaysTrue)]), 'ru': lambda v: dh_centuryBC(v, u'%R век до н. э.'), 'scn': lambda v: dh_centuryBC(v, u'Sèculu %R a.C.'), 'sk': lambda v: dh_centuryBC(v, u'%d. storočie pred Kr.'), 'sl': lambda v: dh_centuryBC(v, u'%d. stoletje pr. n. št.'), 'sq': lambda v: dh_centuryBC(v, u'Shekulli %R p.e.s.'), 'sr': lambda v: dh_centuryBC(v, u'%d. век пне.'), 'sv': lambda v: dh(v, u'%d00-talet f.Kr.', lambda i: i - 1, lambda ii: ii[0] + 1), 'tr': lambda v: dh_centuryBC(v, u'MÖ %d. yüzyıl'), 'tt': lambda v: dh_centuryBC(v, u'MA %d. yöz'), 'uk': lambda v: dh_centuryBC(v, u'%d століття до Р.Х.'), 'zh': lambda m: multi(m, [ (lambda v: dh_centuryBC(v, u'前%d世纪'), lambda p: p < 4), (lambda v: dh_centuryBC(v, u'前%d世紀'), alwaysTrue)]), }, 'CenturyAD_Cat': { 'cs': lambda v: dh_centuryAD(v, u'%d. století'), 'da': lambda v: dh_centuryAD(v, u'%d. århundrede'), 'no': lambda v: dh(v, u'%d-tallet', lambda i: (i - 1) * 100, lambda ii: ii[0] // 100 + 1), }, 'CenturyBC_Cat': { 'cs': lambda v: dh_centuryBC(v, u'%d. století př. n. l.'), 'de': lambda v: dh_centuryBC(v, u'Jahr (%d. Jh. v. Chr.)'), 'no': lambda v: dh(v, u'%d-tallet f.Kr.', lambda i: (i - 1) * 100, lambda ii: ii[0] // 100 + 1), }, 'MillenniumAD': { 'bg': lambda v: dh_millenniumAD(v, u'%d хилядолетие'), 'ca': lambda v: dh_millenniumAD(v, u'Mil·lenni %R'), 'cs': lambda v: dh_millenniumAD(v, u'%d. tisíciletí'), 'de': lambda v: dh_millenniumAD(v, u'%d. Jahrtausend'), 'el': lambda v: dh_millenniumAD(v, u'%dη χιλιετία'), 'en': lambda m: multi(m, [ (lambda v: dh_millenniumAD(v, u'%dst millennium'), lambda p: p == 1 or (p > 20 and p % 10 == 1)), (lambda v: dh_millenniumAD(v, u'%dnd millennium'), lambda p: p == 2 or (p > 20 and p % 10 == 2)), (lambda v: dh_millenniumAD(v, u'%drd millennium'), lambda p: p == 3 or (p > 20 and p % 10 == 3)), (lambda v: dh_millenniumAD(v, u'%dth millennium'), alwaysTrue)]), 'es': lambda v: dh_millenniumAD(v, u'%R milenio'), 'fa': lambda v: dh_millenniumAD(v, u'هزاره %R (میلادی)'), 'fi': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'Ensimmäinen vuosituhat'), lambda p: p == 1), (lambda v: dh_constVal(v, 2, u'Toinen vuosituhat'), lambda p: p == 2), (lambda v: dh_constVal(v, 3, u'Kolmas vuosituhat'), lambda p: p == 3), (lambda v: dh_constVal(v, 4, u'Neljäs vuosituhat'), lambda p: p == 4), (lambda v: dh_constVal(v, 5, u'Viides vuosituhat'), lambda p: p == 5), (lambda v: dh(v, u'%d000-vuosituhat', lambda i: i - 1, lambda ii: ii[0] + 1), alwaysTrue)]), 'fr': lambda m: multi(m, [ (lambda v: dh_millenniumAD(v, u'%Rer millénaire'), lambda p: p == 1), (lambda v: dh_millenniumAD(v, u'%Re millénaire'), alwaysTrue)]), 'he': lambda m: multi(m, [ (lambda v: dh_millenniumAD(v, u'האלף הראשון %d'), lambda p: p == 1), (lambda v: dh_millenniumAD(v, u'האלף השני %d'), lambda p: p == 2), (lambda v: dh_millenniumAD(v, u'האלף השלישי %d'), lambda p: p == 3), (lambda v: dh_millenniumAD(v, u'האלף הרביעי %d'), lambda p: p == 4), (lambda v: dh_millenniumAD(v, u'האלף החמישי %d '), lambda p: p == 5), (lambda v: dh_millenniumAD(v, u'האלף השישי %d'), lambda p: p == 6), (lambda v: dh_millenniumAD(v, u'האלף השביעי %d'), lambda p: p == 7), (lambda v: dh_millenniumAD(v, u'האלף השמיני %d'), lambda p: p == 8), (lambda v: dh_millenniumAD(v, u'האלף התשיעי %d'), lambda p: p == 9), (lambda v: dh_millenniumAD(v, u'האלף העשירי %d'), lambda p: p == 10), (lambda v: dh_millenniumAD(v, u'האלף ה־%d'), alwaysTrue)]), 'hu': lambda v: dh_millenniumAD(v, u'%d. évezred'), 'it': lambda v: dh_millenniumAD(v, u'%R millennio'), 'ja': lambda v: dh_millenniumAD(v, u'%d千年紀'), 'ka': lambda v: dh_millenniumAD(v, u'%R ათასწლეული'), 'ksh': lambda m: multi(m, [ (lambda v: dh_constVal(v, 1, u'Eetße Johdousend'), lambda p: p == 1), (lambda v: dh_constVal(v, 2, u'Zweijte Johdousend'), lambda p: p == 2), (lambda v: dh_constVal(v, 3, u'Drette Johdousend'), lambda p: p == 3), (lambda v: dh_constVal(v, 4, u'Veete Johdousend'), lambda p: p == 4), (lambda v: dh_constVal(v, 5, u'Föfte Johdousend'), lambda p: p == 5), (lambda v: dh_millenniumAD(v, u'%d. Johdousend'), alwaysTrue)]), 'lb': lambda v: dh_millenniumAD(v, u'%d. Joerdausend'), 'mhr': lambda v: dh_millenniumAD(v, u'%R. курым — '), 'lt': lambda v: dh_millenniumAD(v, u'%d tūkstantmetis'), 'pt': lambda v: slh(v, [ u'Primeiro milénio d.C.', u'Segundo milénio d.C.', u'Terceiro milénio d.C.', u'Quarto milénio d.C.']), 'ro': lambda v: slh(v, [u'Mileniul I', u'Mileniul al II-lea', u'Mileniul III']), 'ru': lambda v: dh_millenniumAD(v, u'%d тысячелетие'), 'sk': lambda v: dh_millenniumAD(v, u'%d. tisícročie'), 'sl': lambda v: dh_millenniumAD(v, u'%d. tisočletje'), 'sv': lambda v: dh(v, u'%d000-talet (millennium)', lambda i: i - 1, lambda ii: ii[0] + 1), 'tt': lambda v: dh_millenniumAD(v, u'%d. meñyıllıq'), 'ur': lambda m: multi(m, [ (lambda v: dh_constVal(v, 0, u'0000مبم'), lambda p: p == 0), (lambda v: dh_millenniumAD(v, u'%d000مبم'), alwaysTrue)]), }, 'MillenniumBC': { 'bg': lambda v: dh_millenniumBC(v, u'%d хилядолетие пр.н.е.'), 'ca': lambda v: dh_millenniumBC(v, u'Mil·lenni %R aC'), 'cs': lambda v: dh_millenniumBC(v, u'%d. tisíciletí př. n. l.'), 'da': lambda v: dh_millenniumBC(v, u'%d. årtusinde f.Kr.'), 'de': lambda v: dh_millenniumBC(v, u'%d. Jahrtausend v. Chr.'), 'el': lambda v: dh_millenniumBC(v, u'%dη χιλιετία π.Χ.'), 'en': lambda v: dh_millenniumBC(v, u'%dst millennium BC'), 'es': lambda v: dh_millenniumBC(v, u'%R milenio adC'), 'fi': lambda m: multi(m,
<reponame>yinquan529/platform-external-chromium-trace #!/usr/bin/env python # # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for checking JS files for common style guide violations. These style guide violations should only apply to JavaScript and not an Ecma scripting languages. """ __author__ = ('<EMAIL> (<NAME>)', '<EMAIL> (<NAME>)', '<EMAIL> (<NAME>)') import re from sets import Set from closure_linter import ecmalintrules from closure_linter import error_check from closure_linter import errors from closure_linter import javascripttokenizer from closure_linter import javascripttokens from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position # Shorthand Error = error.Error Position = position.Position Rule = error_check.Rule Type = javascripttokens.JavaScriptTokenType class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): """JavaScript lint rules that catch JavaScript specific style errors.""" def __init__(self, namespaces_info): """Initializes a JavaScriptLintRules instance.""" ecmalintrules.EcmaScriptLintRules.__init__(self) self._namespaces_info = namespaces_info self._declared_private_member_tokens = {} self._declared_private_members = Set() self._used_private_members = Set() def HandleMissingParameterDoc(self, token, param_name): """Handle errors associated with a parameter missing a param tag.""" self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION, 'Missing docs for parameter: "%s"' % param_name, token) def __ContainsRecordType(self, token): """Check whether the given token contains a record type. Args: token: The token being checked Returns: True if the token contains a record type, False otherwise. """ # If we see more than one left-brace in the string of an annotation token, # then there's a record type in there. return ( token and token.type == Type.DOC_FLAG and token.attached_object.type is not None and token.attached_object.type.find('{') != token.string.rfind('{')) def CheckToken(self, token, state): """Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration state: parser_state object that indicates the current state in the page """ if self.__ContainsRecordType(token): # We should bail out and not emit any warnings for this annotation. # TODO(nicksantos): Support record types for real. state.GetDocComment().Invalidate() return # Call the base class's CheckToken function. super(JavaScriptLintRules, self).CheckToken(token, state) # Store some convenience variables namespaces_info = self._namespaces_info if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): # Find all assignments to private members. if token.type == Type.SIMPLE_LVALUE: identifier = token.string if identifier.endswith('_') and not identifier.endswith('__'): doc_comment = state.GetDocComment() suppressed = (doc_comment and doc_comment.HasFlag('suppress') and doc_comment.GetFlag('suppress').type == 'underscore') if not suppressed: # Look for static members defined on a provided namespace. namespace = namespaces_info.GetClosurizedNamespace(identifier) provided_namespaces = namespaces_info.GetProvidedNamespaces() # Skip cases of this.something_.somethingElse_. regex = re.compile('^this\.[a-zA-Z_]+$') if namespace in provided_namespaces or regex.match(identifier): variable = identifier.split('.')[-1] self._declared_private_member_tokens[variable] = token self._declared_private_members.add(variable) elif not identifier.endswith('__'): # Consider setting public members of private members to be a usage. for piece in identifier.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) # Find all usages of private members. if token.type == Type.IDENTIFIER: for piece in token.string.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) if token.type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'param' and flag.name_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.name_token) if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and flag.type is not None and flag.name is not None): # Check for optional marker in type. if (flag.type.endswith('=') and not flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX, 'Optional parameter name %s must be prefixed ' 'with opt_.' % flag.name, token) elif (not flag.type.endswith('=') and flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE, 'Optional parameter %s type must end with =.' % flag.name, token) if flag.flag_type in state.GetDocFlag().HAS_TYPE: # Check for both missing type token and empty type braces '{}' # Missing suppress types are reported separately and we allow enums # without types. if (flag.flag_type not in ('suppress', 'enum') and (not flag.type or flag.type.isspace())): self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, 'Missing type in %s tag' % token.string, token) elif flag.name_token and flag.type_end_token and tokenutil.Compare( flag.type_end_token, flag.name_token) > 0: self._HandleError( errors.OUT_OF_ORDER_JSDOC_TAG_TYPE, 'Type should be immediately after %s tag' % token.string, token) elif token.type == Type.DOUBLE_QUOTE_STRING_START: next_token = token.next while next_token.type == Type.STRING_TEXT: if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( next_token.string): break next_token = next_token.next else: self._HandleError( errors.UNNECESSARY_DOUBLE_QUOTED_STRING, 'Single-quoted string preferred over double-quoted string.', token, Position.All(token.string)) elif token.type == Type.END_DOC_COMMENT: doc_comment = state.GetDocComment() # When @externs appears in a @fileoverview comment, it should trigger # the same limited doc checks as a special filename like externs.js. if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'): self._SetLimitedDocChecks(True) if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and not self._is_html and state.InTopLevel() and not state.InBlock()): # Check if we're in a fileoverview or constructor JsDoc. is_constructor = ( doc_comment.HasFlag('constructor') or doc_comment.HasFlag('interface')) is_file_overview = doc_comment.HasFlag('fileoverview') # If the comment is not a file overview, and it does not immediately # precede some code, skip it. # NOTE: The tokenutil methods are not used here because of their # behavior at the top of a file. next_token = token.next if (not next_token or (not is_file_overview and next_token.type in Type.NON_CODE_TYPES)): return # Don't require extra blank lines around suppression of extra # goog.require errors. if (doc_comment.SuppressionOnly() and next_token.type == Type.IDENTIFIER and next_token.string in ['goog.provide', 'goog.require']): return # Find the start of this block (include comments above the block, unless # this is a file overview). block_start = doc_comment.start_token if not is_file_overview: token = block_start.previous while token and token.type in Type.COMMENT_TYPES: block_start = token token = token.previous # Count the number of blank lines before this block. blank_lines = 0 token = block_start.previous while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]: if token.type == Type.BLANK_LINE: # A blank line. blank_lines += 1 elif token.type == Type.WHITESPACE and not token.line.strip(): # A line with only whitespace on it. blank_lines += 1 token = token.previous # Log errors. error_message = False expected_blank_lines = 0 if is_file_overview and blank_lines == 0: error_message = 'Should have a blank line before a file overview.' expected_blank_lines = 1 elif is_constructor and blank_lines != 3: error_message = ( 'Should have 3 blank lines before a constructor/interface.') expected_blank_lines = 3 elif not is_file_overview and not is_constructor and blank_lines != 2: error_message = 'Should have 2 blank lines between top-level blocks.' expected_blank_lines = 2 if error_message: self._HandleError( errors.WRONG_BLANK_LINE_COUNT, error_message, block_start, Position.AtBeginning(), expected_blank_lines - blank_lines) elif token.type == Type.END_BLOCK: if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) function = state.GetFunction() if not self._limited_doc_checks: if (function.has_return and function.doc and not is_immediately_called and not function.doc.HasFlag('return') and not function.doc.InheritsDocumentation() and not function.doc.HasFlag('constructor')): # Check for proper documentation of return value. self._HandleError( errors.MISSING_RETURN_DOCUMENTATION, 'Missing @return JsDoc in function with non-trivial return', function.doc.end_token, Position.AtBeginning()) elif (not function.has_return and not function.has_throw and function.doc and function.doc.HasFlag('return') and not state.InInterfaceMethod()): return_flag = function.doc.GetFlag('return') if (return_flag.type is None or ( 'undefined' not in return_flag.type and 'void' not in return_flag.type and '*' not in return_flag.type)): self._HandleError( errors.UNNECESSARY_RETURN_DOCUMENTATION, 'Found @return JsDoc on function that returns nothing', return_flag.flag_token, Position.AtBeginning()) if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) if (function.has_this and function.doc and not function.doc.HasFlag('this') and not function.is_constructor and not function.is_interface and '.prototype.' not in function.name): self._HandleError( errors.MISSING_JSDOC_TAG_THIS, 'Missing @this JsDoc in function referencing "this". (' 'this usually means you are trying to reference "this" in ' 'a static function, or you have forgotten to mark a ' 'constructor with @constructor)', function.doc.end_token, Position.AtBeginning()) elif token.type == Type.IDENTIFIER: if token.string == 'goog.inherits' and not state.InFunction(): if state.GetLastNonSpaceToken().line_number == token.line_number: self._HandleError( errors.MISSING_LINE, 'Missing newline between constructor and goog.inherits', token, Position.AtBeginning()) extra_space = state.GetLastNonSpaceToken().next while extra_space != token: if extra_space.type == Type.BLANK_LINE: self._HandleError( errors.EXTRA_LINE, 'Extra line between constructor and goog.inherits', extra_space) extra_space = extra_space.next # TODO(robbyw): Test the last function was a constructor. # TODO(robbyw): Test correct @extends and @implements documentation. elif (token.string == 'goog.provide' and not state.InFunction() and namespaces_info is not None): namespace = tokenutil.Search(token, Type.STRING_TEXT).string # Report extra goog.provide statement. if namespaces_info.IsExtraProvide(token): self._HandleError( errors.EXTRA_GOOG_PROVIDE, 'Unnecessary goog.provide: ' + namespace, token, position=Position.AtBeginning()) if namespaces_info.IsLastProvide(token): # Report missing provide statements after the last existing provide. missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, tokenutil.GetLastTokenInSameLine(token).next, False) # If there are no require statements, missing requires should be # reported after the last provide. if not namespaces_info.GetRequiredNamespaces(): missing_requires = namespaces_info.GetMissingRequires() if missing_requires: self._ReportMissingRequires( missing_requires, tokenutil.GetLastTokenInSameLine(token).next, True)
# -*- coding: utf-8 -*- import functools import json from datetime import datetime from typing import Any, List from base64 import b64encode import jwt import requests.exceptions import urllib3 from requests import Response, Session from requests.utils import quote from requests.adapters import HTTPAdapter from yapconf import YapconfSpec import brewtils.plugin from brewtils.errors import _deprecate from brewtils.rest import normalize_url_prefix from brewtils.specification import _CONNECTION_SPEC def enable_auth(method): """Decorate methods with this to enable using authentication""" @functools.wraps(method) def wrapper(self, *args, **kwargs): # Proactively refresh access token, if possible try: if self.access_token and self.refresh_token: now = datetime.utcnow() decoded = jwt.decode(self.access_token, verify=False) issued = datetime.utcfromtimestamp(int(decoded["iat"])) expires = datetime.utcfromtimestamp(int(decoded["exp"])) # Try to refresh there's less than 10% time remaining if (expires - now) < (0.1 * (expires - issued)): self.refresh() except Exception: pass original_response = method(self, *args, **kwargs) if original_response.status_code != 401: return original_response # Try to use the refresh token if self.refresh_token: refresh_response = self.refresh() if refresh_response.ok: return method(self, *args, **kwargs) # Try to use credentials if self.username and self.password: credential_response = self.get_tokens() if credential_response.ok: return method(self, *args, **kwargs) # Nothing worked, just return the original response return original_response return wrapper class TimeoutAdapter(HTTPAdapter): """Transport adapter with a default request timeout""" def __init__(self, **kwargs): self.timeout = kwargs.pop("timeout", None) super(TimeoutAdapter, self).__init__(**kwargs) def send(self, *args, **kwargs): """Sends PreparedRequest object with specified timeout.""" kwargs["timeout"] = kwargs.get("timeout") or self.timeout return super(TimeoutAdapter, self).send(*args, **kwargs) class RestClient(object): """HTTP client for communicating with Beer-garden. The is the low-level client responsible for making the actual REST calls. Other clients (e.g. :py:class:`brewtils.rest.easy_client.EasyClient`) build on this by providing useful abstractions. Args: bg_host (str): Beer-garden hostname bg_port (int): Beer-garden port bg_url_prefix (str): URL path that will be used as a prefix when communicating with Beer-garden. Useful if Beer-garden is running on a URL other than '/'. ssl_enabled (bool): Whether to use SSL for Beer-garden communication ca_cert (str): Path to certificate file containing the certificate of the authority that issued the Beer-garden server certificate ca_verify (bool): Whether to verify Beer-garden server certificate client_cert (str): Path to client certificate to use when communicating with Beer-garden api_version (int): Beer-garden API version to use client_timeout (int): Max time to wait for Beer-garden server response username (str): Username for Beer-garden authentication password (str): Password for Beer-<PASSWORD> authentication access_token (str): Access token for Beer-garden authentication refresh_token (str): Refresh token for Beer-garden authentication """ # Latest API version currently released LATEST_VERSION = 1 JSON_HEADERS = {"Content-type": "application/json", "Accept": "text/plain"} def __init__(self, *args, **kwargs): self._config = self._load_config(args, kwargs) self.bg_host = self._config.bg_host self.bg_port = self._config.bg_port self.bg_prefix = self._config.bg_url_prefix self.api_version = self._config.api_version self.username = self._config.username self.password = <PASSWORD>._<PASSWORD>.password self.access_token = self._config.access_token self.refresh_token = self._config.refresh_token # Configure the session to use when making requests self.session = Session() if self._config.proxy: if self._config.ssl_enabled: self.session.proxies.update({"https": self._config.proxy}) else: self.session.proxies.update({"http": self._config.proxy}) # This is what Requests is expecting if self._config.client_key: self.session.cert = (self._config.client_cert, self._config.client_key) else: self.session.cert = self._config.client_cert if not self._config.ca_verify: urllib3.disable_warnings() self.session.verify = False elif self._config.ca_cert: self.session.verify = self._config.ca_cert client_timeout = self._config.client_timeout if client_timeout == -1: client_timeout = None # Having two is kind of strange to me, but this is what Requests does self.session.mount("https://", TimeoutAdapter(timeout=client_timeout)) self.session.mount("http://", TimeoutAdapter(timeout=client_timeout)) # Configure the beer-garden URLs self.base_url = "%s://%s:%s%s" % ( "https" if self._config.ssl_enabled else "http", self.bg_host, self.bg_port, normalize_url_prefix(self.bg_prefix), ) self.version_url = self.base_url + "version" self.config_url = self.base_url + "config" if self.api_version == 1: self.garden_url = self.base_url + "api/v1/gardens/" self.system_url = self.base_url + "api/v1/systems/" self.instance_url = self.base_url + "api/v1/instances/" self.command_url = self.base_url + "api/v1/commands/" self.request_url = self.base_url + "api/v1/requests/" self.queue_url = self.base_url + "api/v1/queues/" self.logging_url = self.base_url + "api/v1/logging/" self.job_url = self.base_url + "api/v1/jobs/" self.job_export_url = self.base_url + "api/v1/export/jobs/" self.job_import_url = self.base_url + "api/v1/import/jobs/" self.token_url = self.base_url + "api/v1/tokens/" self.user_url = self.base_url + "api/v1/users/" self.admin_url = self.base_url + "api/v1/admin/" self.forward_url = self.base_url + "api/v1/forward" # Deprecated self.logging_config_url = self.base_url + "api/v1/config/logging/" # Beta self.event_url = self.base_url + "api/vbeta/events/" self.chunk_url = self.base_url + "api/vbeta/chunks/" self.file_url = self.base_url + "api/vbeta/file/" else: raise ValueError("Invalid Beer-garden API version: %s" % self.api_version) @staticmethod def _load_config(args, kwargs): """Load a config based on the CONNECTION section of the Brewtils Specification This will load a configuration with the following source precedence: 1. kwargs 2. kwargs with "old" names ("host", "port", "url_prefix") 3. host and port passed as positional arguments 4. the global configuration (brewtils.plugin.CONFIG) Args: args (deprecated): host and port kwargs: Standard connection arguments to be used Returns: The resolved configuration object """ spec = YapconfSpec(_CONNECTION_SPEC) renamed = {} for key in ["host", "port", "url_prefix"]: if kwargs.get(key): renamed["bg_" + key] = kwargs.get(key) positional = {} if len(args) > 0: _deprecate( "Heads up - passing bg_host as a positional argument is deprecated " "and will be removed in version 4.0", stacklevel=kwargs.get("stacklevel", 3), ) positional["bg_host"] = args[0] if len(args) > 1: _deprecate( "Heads up - passing bg_port as a positional argument is deprecated " "and will be removed in version 4.0", stacklevel=kwargs.get("stacklevel", 3), ) positional["bg_port"] = args[1] return spec.load_config(*[kwargs, renamed, positional, brewtils.plugin.CONFIG]) def can_connect(self, **kwargs): # type: (**Any) -> bool """Determine if a connection to the Beer-garden server is possible Args: **kwargs: Keyword arguments to pass to Requests session call Returns: A bool indicating if the connection attempt was successful. Will return False only if a ConnectionError is raised during the attempt. Any other exception will be re-raised. Raises: requests.exceptions.RequestException: The connection attempt resulted in an exception that indicates something other than a basic connection error. For example, an error with certificate verification. """ try: self.session.get(self.config_url, **kwargs) except requests.exceptions.ConnectionError as ex: if type(ex) == requests.exceptions.ConnectionError: return False raise return True @enable_auth def get_version(self, **kwargs): # type: (**Any) -> Response """Perform a GET to the version URL Args: **kwargs (deprecated): Unused. Accepted for compatibility. Returns: Requests Response object """ if kwargs: _deprecate( "Keyword arguments for get_version are no longer used and will be " "removed in a future release." ) return self.session.get(self.version_url) @enable_auth def get_config(self, **kwargs): # type: (**Any) -> Response """Perform a GET to the config URL Args: **kwargs (deprecated): Unused. Accepted for compatibility. Returns: Requests Response object """ if kwargs: _deprecate( "Keyword arguments for get_config are no longer used and will be " "removed in a future release." ) return self.session.get(self.config_url) @enable_auth def get_logging_config(self, **kwargs): # type: (**Any) -> Response """Perform a GET to the logging config URL Args: **kwargs: Query parameters to be used in the GET request Returns: Requests Response object """ return self.session.get(self.logging_url, params=kwargs) @enable_auth def get_garden(self, garden_name, **kwargs): # type: (str, **Any) -> Response """Performs a GET on the Garden URL Args: garden_name: Name of garden to retreive **kwargs: Query parameters to be used in the GET request Returns: Requests Response object """ # quote will URL encode the Garden name return self.session.get(self.garden_url + quote(garden_name), params=kwargs) @enable_auth def post_gardens(self, payload): # type: (str) -> Response """Performs a POST on the Garden URL Args: payload: New Garden definition Returns: Requests Response object """ return self.session.post( self.garden_url, data=payload, headers=self.JSON_HEADERS ) @enable_auth def delete_garden(self, garden_name): # type: (str) -> Response """Performs a DELETE on a Garden URL Args: garden_name: Name of Garden to delete Returns: Requests Response object """ # quote will URL encode the Garden name return self.session.delete(self.garden_url + quote(garden_name)) @enable_auth def get_systems(self, **kwargs): # type: (**Any) -> Response """Perform a GET on the System collection URL Args: **kwargs: Query parameters to be used in the GET request Returns: Requests Response object """ return self.session.get(self.system_url, params=kwargs) @enable_auth def get_system(self, system_id, **kwargs): # type: (str, **Any) -> Response """Performs a GET on the System URL Args: system_id: System ID **kwargs: Query parameters to be used in the GET request Returns: Requests Response object """ return self.session.get(self.system_url + system_id, params=kwargs) @enable_auth def post_systems(self, payload): # type: (str) -> Response """Performs a POST on the System URL Args: payload: New System definition Returns: Requests Response object """ return self.session.post( self.system_url, data=payload, headers=self.JSON_HEADERS ) @enable_auth def patch_system(self, system_id, payload): # type: (str, str) -> Response """Performs a PATCH on a System URL Args: system_id: System ID payload: Serialized PatchOperation Returns: Requests Response object """ return self.session.patch( self.system_url + str(system_id),
stopping behaviour.") early_stopping_kwargs = { "early_stopping_metric": "elbo", "save_best_state_metric": "elbo", "patience": 50, "threshold": 0, "reduce_lr_on_plateau": True, "lr_patience": 25, "lr_factor": 0.2, } trainer_specific_kwargs["early_stopping_kwargs"] = early_stopping_kwargs # add elbo to metrics to monitor metrics_to_monitor = trainer_specific_kwargs.get("metrics_to_monitor", []) metrics_to_monitor.append("elbo") trainer_specific_kwargs["metrics_to_monitor"] = metrics_to_monitor # default search space if space is None: logger.debug("Using default parameter search space.") space = { "model_tunable_kwargs": { "n_latent": 5 + hp.randint("n_latent", 11), # [5, 15] "n_hidden": hp.choice("n_hidden", [64, 128, 256]), "n_layers": 1 + hp.randint("n_layers", 5), "dropout_rate": hp.choice("dropout_rate", [0.1, 0.3, 0.5, 0.7, 0.9]), "reconstruction_loss": hp.choice("reconstruction_loss", ["zinb", "nb"]), }, "train_func_tunable_kwargs": { "lr": hp.choice("lr", [0.01, 0.005, 0.001, 0.0005, 0.0001]) }, } logger.info( "Fixed parameters: \n" "model: \n" + str(model_specific_kwargs) + "\n" + "trainer: \n" + str(trainer_specific_kwargs) + "\n" + "train method: \n" + str(train_func_specific_kwargs) ) # build a partial objective function restricted to the search space if objective_hyperopt is None: objective_hyperopt = partial( _objective_function, **{ "gene_dataset": gene_dataset, "model_class": model_class, "trainer_class": trainer_class, "model_specific_kwargs": model_specific_kwargs, "trainer_specific_kwargs": trainer_specific_kwargs, "train_func_specific_kwargs": train_func_specific_kwargs, "use_batches": use_batches, }, ) if parallel: logger.info("Starting parallel hyperoptimization") trials = _auto_tune_parallel( objective_hyperopt=objective_hyperopt, exp_key=exp_key, space=space, max_evals=max_evals, save_path=save_path, n_cpu_workers=n_cpu_workers, gpu_ids=gpu_ids, n_workers_per_gpu=n_workers_per_gpu, reserve_timeout=reserve_timeout, fmin_timeout=fmin_timeout, fmin_timer=fmin_timer, mongo_port=mongo_port, mongo_host=mongo_host, db_name=db_name, multiple_hosts=multiple_hosts, ) else: logger.info("Starting sequential hyperoptimization") trials = Trials() # run hyperoptimization _ = fmin( fn=objective_hyperopt, space=space, algo=tpe.suggest, max_evals=max_evals, trials=trials, ) # return best model, trained if train_best: logger.debug("Training best model with full training set") best_space = trials.best_trial["result"]["space"] best_trainer = objective_hyperopt(best_space, is_best_training=True) if pickle_result: if train_best: logger.debug("Pickling best model and trainer") # pickle trainer and save model (overkill?) with open( os.path.join(save_path, "best_trainer_{key}".format(key=exp_key)), "wb" ) as f: pickle.dump(best_trainer, f) torch.save( best_trainer.model.state_dict(), os.path.join(save_path, "best_model_{key}".format(key=exp_key)), ) # remove object containing thread.lock (otherwise pickle.dump throws) logger.debug("Pickling Trials object") if hasattr(trials, "handle"): del trials.handle with open( os.path.join(save_path, "trials_{key}".format(key=exp_key)), "wb" ) as f: pickle.dump(trials, f) # remove added logging handlers/formatters _cleanup_logger() if train_best: return best_trainer, trials else: return trials def _auto_tune_parallel( objective_hyperopt: Callable, exp_key: str, space: dict = None, max_evals: int = 100, save_path: str = ".", n_cpu_workers: int = None, gpu_ids: List[int] = None, n_workers_per_gpu: int = 1, reserve_timeout: float = 30.0, fmin_timeout: float = 60.0, fmin_timer: float = None, mongo_port: str = "1234", mongo_host: str = "localhost", db_name: str = "scvi_db", multiple_hosts: bool = False, ) -> MongoTrials: """Parallel version of the hyperoptimization procedure. Called by ``auto_tune_scvi_model`` when ``parallel=True``. Specifically, first the MongoDb service is launched in its own forked process. Then, the call to the minimization process is made in its own forked process. Then, the call ``worker_launcher`` is made in its own Thread. After that, the program waits for either the minimization process to finish or for the workers to all timeout. When one of these conditions is verified the program kills the waiter for the other and tries to dequeue the results from the minimization process. At that point, if ``multiple_hosts`` is set to True, the program waits indefinitely for the minimization process to put the results in the queue. If not, the minimisation process has ``fmin_timeout`` seconds to finish. This mechanism ensures that the program does not hang if, for any reason, the workers die before completing all the jobs. Note that logs to the ``hyperopt`` package are automatically stored in ``./hyperopt_logfile.txt``. Note that the progress bar is automatically disabled if the logging level for ``scvi.inference.autotune`` is lower than logging.WARNING. :param objective_hyperopt: Callable, the objective function to minimize :param exp_key: Name of the experiment in MongoDb. :param space: ``dict`` containing up to three sub-dicts with keys "model_tunable_kwargs", "trainer_tunable_kwargs" or "train_func_tunable_kwargs". Each of those dict contains ``hyperopt`` defined parameter spaces (e.g. ``hp.choice(..)``) which will be passed to the corresponding object : model, trainer or train method when performing hyperoptimization. Default: mutable, see source code. :param max_evals: Maximum number of evaluations of the objective. :param save_path: Path where to save best model, trainer, trials and mongo files. :param n_cpu_workers: Number of cpu workers to launch. If None, and no GPUs are found, defaults to ``os.cpucount() - 1``. Else, defaults to 0. :param gpu_ids: Ids of the GPUs to use. If None defaults to all GPUs found by ``torch``. Note that considered gpu ids are int from ``0`` to ``torch.cuda.device_count()``. :param n_workers_per_gpu: Number of workers ton launch per gpu found by ``torch``. :param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for before throwing a ``ReserveTimeout`` Exception. :param fmin_timeout: Amount of time, in seconds, ``fmin_process`` has to terminate after all workers have died - before throwing a ``FminTimeoutError``. If ``multiple_hosts`` is set to ``True``, this is set to None to disable the timineout behaviour. :param fmin_timer: Global amount of time allowed for fmin_process. If not None, the minimization procedure will be stopped after ``fmin_timer`` seconds. Used only if ``parallel`` is set to ``True``. :param mongo_port: Port to the mongo db. :param mongo_host: Hostname used with mongo_port to indicate the prefix of the mongodb address. The prefix of the address passed onto the workers and MongoTrials object is ``'{mongo_host}:{mongo_port}'``. :param db_name: Name to use when creating the Mongo database. Suffix of the mongo address. :param multiple_hosts: If ``True``, user is considered to have workers launched on several machines. Therefore, setting this to ``True`` disables the ``fmin_timeout`` behaviour. :return: ``MongoTrials`` object containing the results of the program. """ # run mongod bash script mongo_path = os.path.join(save_path, "mongo") if not os.path.exists(mongo_path): os.makedirs(mongo_path) mongo_logfile = open(os.path.join(mongo_path, "mongo_logfile.txt"), "w") open_files.append(mongo_logfile) logger.debug( "Starting MongoDb process, logs redirected to " "{name}.".format(name=mongo_logfile.name) ) mongod_process = Popen( [ "mongod", "--quiet", "--dbpath={path}".format(path=mongo_path), "--port={port}".format(port=mongo_port), ], stdout=mongo_logfile, ) mongo_port_address = os.path.join(mongo_host + ":" + mongo_port, db_name) started_processes.append(mongod_process) # log hyperopt to file hp_logger = logging.getLogger("hyperopt") fh_hyperopt = logging.FileHandler(os.path.join(save_path, "hyperopt_logfile.txt")) fh_hyperopt.setFormatter(formatter) hp_logger.addHandler(fh_hyperopt) # add progress handler to progress logger progress_logger = logging.getLogger("progress_logger") disable = multiple_hosts or (logger.level < logging.WARNING) pbar = tqdm.tqdm(total=max_evals, disable=disable) progress_logger.addHandler(ProgressHandler(pbar=pbar, disable=disable)) # start by running fmin process so that workers don't timeout # run hyperoptimization, in a forked process # this allows to warn if the workers crash # since mongo is not thread-safe, trials must be instantiated in each child logger.debug("Starting minimization procedure") queue = fork_ctx.Queue() fmin_kwargs = { "queue": queue, "fn": objective_hyperopt, "exp_key": exp_key, "space": space, "algo": tpe.suggest, "max_evals": max_evals, "fmin_timer": fmin_timer, "show_progressbar": False, # progbar useless in parallel mode "mongo_port_address": mongo_port_address, } fmin_process = fork_ctx.Process( target=_fmin_parallel, kwargs=fmin_kwargs, name="fmin Process" ) fmin_process.start() started_processes.append(fmin_process) # start worker launcher logger.debug("Starting worker launcher") stop_watchdog_event = threading.Event() launcher_kwargs = { "stop_watchdog_event": stop_watchdog_event, "exp_key": exp_key, "n_cpu_workers": n_cpu_workers, "gpu_ids": gpu_ids, "n_workers_per_gpu": n_workers_per_gpu, "reserve_timeout": reserve_timeout, "workdir": mongo_path, "mongo_port_address": mongo_port_address, "multiple_hosts": multiple_hosts, } workers_thread = threading.Thread( target=launch_workers, kwargs=launcher_kwargs, name="Worker Launcher" ) workers_thread.start() started_threads.append(workers_thread) # wait for workers and fmin process simultaneously workers_done_event = threading.Event() fmin_done_event = threading.Event() fmin_waiter = threading.Thread( target=_wait_for_process_or_thread, kwargs={"process": fmin_process, "event": fmin_done_event}, name="Waiter fmin", ) fmin_waiter.start() started_threads.append(fmin_waiter) workers_waiter = threading.Thread( target=_wait_for_process_or_thread, kwargs={"process": workers_thread, "event": workers_done_event}, name="Waiter workers", ) workers_waiter.start() started_threads.append(workers_waiter) while not workers_done_event.is_set() and not fmin_done_event.is_set(): time.sleep(5) # when one of them finishes, if it is fmin -> trials should be in the queue # if not and not using multiple hosts we wait fmin_timeout seconds for fmin to finish # in any case, close waiter threads if fmin_done_event.is_set(): logger.debug("Setting worker watchdog and waiter stop events.") stop_watchdog_event.set() workers_done_event.set() if workers_done_event.is_set() and not multiple_hosts: logger.debug("Setting fmin waiter stop event.") fmin_done_event.set() try: if multiple_hosts: # if using multiple_hosts, there could still be workers -> disable fmin timeout fmin_timeout = None logger.debug( "multiple_hosts set to True, fmin will block until all trials have been completed." ) else: logger.debug( "multiple_hosts set to false, Fmin has {time} seconds to finish".format( time=fmin_timeout ) ) trials = queue.get(timeout=fmin_timeout) except Empty: logger.error( "Queue still empty {fmin_timeout} seconds after all workers have died." "\n".format(fmin_timeout=fmin_timeout) + "Terminating minimization process." ) raise FminTimeoutError( "Queue still empty {fmin_timeout} seconds after all workers " "have died. Check that you have used a new exp_key or allowed " "a higher max_evals".format(fmin_timeout=fmin_timeout) ) # sanity: wait for fmin, terminate workers and wait for launcher fmin_process.join() stop_watchdog_event.set() workers_thread.join() logger.info(
message # Input # msg (unicode): string that contains the message for the calculation # fontpkg (fontpkg): the font to use for the calculation # varwidth (bool): Should the font be fixed or variable width maxw = 0 maxh = 0 cx = 0 (fx,fy) = fontpkg['size'] for c in msg: if c == u'\n': maxh = maxh + fy if cx > maxw: maxw = cx cx = 0 continue try: charimg = fontpkg[ord(c)] except KeyError: # Requested character does not exist in font. Replace with '?' charimg = fontpkg[ord('?')] if varwidth: cx += charimg.size[0] else: cx += fx if cx > maxw: maxw = cx maxh = maxh + fy return ((maxw, maxh)) def text(self, formatstring, variables, fontpkg, varwidth = True, specifiedsize=(0,0), just=u'left'): # Input # formatstring (unicode) -- format string # variables (unicode array) -- list of variables used to populate formatstring. Variable values come from variabledict. # fontpkg (font object) -- The font that the message should be rendered in. # varwidth (bool) -- Whether the font should be shown monospaced or with variable pitch # specifiedsize (integer tuple) -- Size to make image if larger than what is requird for the message size # just (unicode) -- Determines how to justify the text horizontally. Allowed values [ 'left','right','center','centerchar' ] # Save variables used for this text widget self.currentvardict = { } for v in variables: try: jv = v.split(u'|')[0] self.currentvardict[jv] = self.variabledict[jv] except KeyError: logging.debug('Trying to save state of {0} but it was not found within database'.format(jv)) pass # save parameters for future updates self.type = u'text' self.formatstring = formatstring self.variables = variables self.fontpkg = fontpkg self.varwidth = varwidth self.just = just self.specifiedsize = specifiedsize (fx,fy) = fontpkg['size'] cx = 0 cy = 0 cw = 0 msg = self.evaltext(formatstring, variables) # initialize image if msg == '': msg = ' ' maxw, maxh = self.textsize(msg, fontpkg, varwidth) # msglines = msg.split('\n') # maxw = 0 # for line in msglines: # if maxw < len(line): # maxw = len(line) # maxw = maxw * fx # maxh = len(msglines) * fy # If a size was provided that is larger than what is required to display the text # expand the image size as appropriate width, height = specifiedsize maxw = maxw if maxw > width else width maxh = maxh if maxh > height else height self.image = Image.new("1", (maxw, maxh), 0) lineimage = Image.new("1", (maxw, fy), 0) for c in msg: # If newline, move y to next line (based upon font height) and return x to beginning of line if c == u'\n': # Place line into image if just == u'left': ax = 0 elif just == u'center': ax = (maxw-cx)/2 # if this is a character mode display then we need to be careful not to split a character across the character boundary elif just == u'centerchar': # If the number of chars is even, then we should be ok if cx % 2 == 0: ax = (maxw-cx)/2 else: # If it's odd though we'll get split so add another character worth of space to the calculation ax = (maxw-cx-fx)/2 elif just == u'right': ax = (maxw-cx) self.image.paste(lineimage, (ax, cy)) lineimage = Image.new("1", (maxw, fy), 0) cy = cy + fy cx = 0 continue try: charimg = fontpkg[ord(c)] except KeyError: # Requested character does not exist in font. Replace with '?' charimg = fontpkg[ord('?')] # Adjust charimg if varwidth is False if not varwidth: offset = (fx-charimg.size[0])/2 charimg = charimg.crop( (-offset,0,fx-offset,fy) ) charimg.load() # Paste character into frame lineimage.paste(charimg, (cx,0)) # Erase space between characters draw = ImageDraw.Draw(lineimage) draw.rectangle((cx+charimg.size[0],0, cx+charimg.size[0], fy-1),0) # Move to next character position if varwidth: cx += charimg.size[0] else: cx += fx # # resize to exact requirement of message # self.image.crop((0,0,cx-1, cy+fy)) # Place last line into image if just == u'left': ax = 0 elif just == u'center': ax = (maxw-cx)/2 # if this is a character mode display then we need to be careful not to split a character across the character boundary elif just == u'centerchar': # If the number of chars is even, then we should be ok if cx % 2 == 0: ax = (maxw-cx)/2 else: # If it's odd though we'll get split so add another character worth of space to the calculation ax = (maxw-cx-fx)/2 elif just == u'right': ax = (maxw-cx) self.image.paste(lineimage, (ax, cy)) self.updatesize() return self.image def ttext(self, formatstring, variables, fontpkg, varwidth = True, specifiedsize=8, just=u'left'): # Input # formatstring (unicode) -- format string # variables (unicode array) -- list of variables used to populate formatstring. Variable values come from variabledict. # fontpkg (font object) -- The font that the message should be rendered in. # varwidth (bool) -- Whether the font should be shown monospaced or with variable pitch # specifiedsize (integer tuple) -- Size to make image if larger than what is requird for the message size # just (unicode) -- Determines how to justify the text horizontally. Allowed values [ 'left','right','center','centerchar' ] # Save variables used for this text widget self.currentvardict = { } for v in variables: try: jv = v.split(u'|')[0] self.currentvardict[jv] = self.variabledict[jv] except KeyError: logging.debug('Trying to save state of {0} but it was not found within database'.format(jv)) pass # save parameters for future updates self.type = u'ttext' self.formatstring = formatstring self.variables = variables self.fontpkg = fontpkg self.varwidth = varwidth self.just = just self.specifiedsize = specifiedsize msg = self.evaltext(formatstring, variables) # initialize image if msg == '': msg = ' ' maxw, maxh = self.fontpkg.getsize(msg) cx = maxw textimage = Image.new("1", (maxw, maxh), 0) # msglines = msg.split('\n') # maxw = 0 # for line in msglines: # if maxw < len(line): # maxw = len(line) # maxw = maxw * fx # maxh = len(msglines) * fy # If a size was provided that is larger than what is required to display the text # expand the image size as appropriate width, height = specifiedsize maxw = maxw if maxw > width else width maxh = maxh if maxh > height else height self.image = Image.new("1", (maxw, maxh), 0) draw = ImageDraw.Draw(textimage) draw.text( (0,0), msg, font=self.fontpkg, fill='white') del draw # # resize to exact requirement of message # self.image.crop((0,0,cx-1, cy+fy)) # Place last line into image if just == u'left': ax = 0 elif just == u'center': ax = (maxw-cx)/2 # if this is a character mode display then we need to be careful not to split a character across the character boundary elif just == u'centerchar': # If the number of chars is even, then we should be ok if cx % 2 == 0: ax = (maxw-cx)/2 else: # If it's odd though we'll get split so add another character worth of space to the calculation ax = (maxw-cx-fx)/2 elif just == u'right': ax = (maxw-cx) self.image.paste(textimage, (ax, 0)) self.updatesize() return self.image # Image widget function def imagewidget(self, image, size=(0,0)): # Input # image (Image object)-- image to place within widget # size (integer tuple) -- size of widget. If not provided then size will be the same as the provided image self.image = image.copy() self.type = 'image' width, height = size # If either dimension is empty, take the dimension from the actual image if not width: width = self.image.size[0] if not height: height = self.image.size[1] # Resize image self.image = self.image.crop( (0,0,width,height)) self.updatesize() return self.image # PROGRESSBAR widget function def progressbar(self, value, rangeval, size, style=u'square'): # Input # value (numeric) -- Value of the variable showing progress. # rangeval (numeric tuple) -- Range of possible values. Used to calculate percentage complete. # size (number tuple) -- width and height to draw progress bar # style (unicode) -- Sets the style of the progress bar. Allowed values [ 'rounded', 'square' ] self.variables = [] # Convert variable to value if needed if type(value) is unicode: v = self.variabledict[value] if value in self.variabledict else 0 if value in self.variabledict: self.variables.append(value) elif type(value) is int or type(value) is float: v = value else: v = 0 l,h = rangeval # Convert range low to value if needed if type(l) is unicode: rvlow = self.variabledict[l] if l in self.variabledict else 0 if l in self.variabledict: self.variables.append(l) elif type(l) is int or type(l) is float: rvlow = l else: rvlow = 0 # Convert range high to value if needed if type(h) is unicode: rvhigh = self.variabledict[h] if h in self.variabledict else 0 if h in self.variabledict: self.variables.append(h) elif type(h) is int or type(h) is float: rvhigh = h else: rvhigh = 0 # Save variables used for this progressbar widget self.currentvardict = { } for sv in self.variables: self.currentvardict[sv] = self.variabledict[sv] width, height = size # correct values if needed if rvhigh < rvlow: t = rvlow rvlow = rvhigh rvhigh = t if v < rvlow : logging.debug("v out of range with value {0}. Should have been between {1} and {2}".format(v,rvlow,rvhigh)) v = rvlow if v > rvhigh : logging.debug("v out of range with value {0}. Should have been between {1} and {2}".format(v,rvlow,rvhigh)) v = rvhigh try: percent = (v - rvlow) / float((rvhigh - rvlow)) except ZeroDivisionError: percent = 0 # make image to place progress bar self.image = Image.new("1", size, 0) if style == u'square': draw = ImageDraw.Draw(self.image) if height > 2: draw.line( (0,0,0,height-1),1) for i in range (0,int((width-2)*percent)): draw.line( (i+1,0,i+1,height-1),1) for i in range (int((width-2)*percent), width-2): self.image.putpixel((i+1,0), 1) self.image.putpixel((i+1,height-1), 1) draw.line( (width-1,0,width-1,height-1),1) else: for i in range (0,int((width)*percent)): draw.line( (i,0,i,height-1),1) self.updatesize() # Save parameters for update self.value = value self.rangeval = rangeval self.style = style self.type = 'progressbar' return self.image # PROGRESSBAR widget function def progressimagebar(self, maskimage, value, rangeval, direction='right'): # Input # image (Image) -- The image
{'компенсировать': 1}, {'испытание': 1}, {'надёжность': 1}, {'неизбежно': 1}, {'потребоваться': 1}, {'импульс': 2}, {'европеиским': 1}, {'россиискои': 1}, {'способствовать': 1}, {'кв': 1}, {'каковы': 1}, {'исполнение': 1}, {'федерация': 1}, {'оборудование': 3}, {'разобраться': 1}, {'аттестация': 1}, {'бесперебоиное': 1}, {'бесперебоиным': 1}, {'высоковольтныи': 1}, {'высоковольтный': 1}, {'доказывая': 1}, {'жителеи': 1}, {'заказчик': 1}, {'зарубежье': 1}, {'зэто': 1}, {'кема': 1}, {'китаиским': 1}, {'климатический': 1}, {'микроблог': 1}, {'напряжении': 1}, {'обратить': 1}, {'ориентировать': 1}, {'подрядчик': 1}, {'подстанция': 1}, {'пул': 1}, {'расширение': 1}, {'самар': 1}, {'сертификация': 1}, {'специально': 1}, {'спроектировать': 1}, {'стагнирующеи': 1}, {'сфере': 1}, {'тендер': 1}, {'трансформатор': 1}, {'тренды': 1}, {'уэтм': 1}, {'цифровизация': 2}, {'частота': 1}, {'читаите': 1}, {'экспортно': 1}, {'экспортныи': 1}, {'экспортный': 4}, {'электроаппарат': 1}, {'электрощит': 1}, {'энергетика': 1}], [{'год': 6}, {'какои': 1}, {'компания': 2}, {'мир': 3}, {'начинать': 1}, {'почему': 1}, {'рассказать': 1}, {'расти': 2}, {'рынок': 5}, {'статья': 1}, {'стоимость': 1}, {'технология': 1}, {'цена': 5}, {'человек': 1}, {'представлять': 1}, {'собои': 1}, {'стоять': 1}, {'большинство': 1}, {'возраст': 1}, {'временной': 1}, {'время': 3}, {'высокий': 1}, {'деньга': 1}, {'достаточно': 1}, {'думать': 1}, {'жизнь': 1}, {'идти': 1}, {'капитал': 1}, {'место': 1}, {'многие': 2}, {'надёжный': 1}, {'определять': 1}, {'очень': 1}, {'платить': 1}, {'покупка': 3}, {'последний': 1}, {'правило': 1}, {'продавать': 2}, {'продажеи': 1}, {'резкий': 1}, {'рекомендовать': 1}, {'роскошный': 1}, {'самые': 1}, {'свободный': 1}, {'сеичас': 3}, {'ситуация': 1}, {'слово': 2}, {'случай': 2}, {'стать': 1}, {'страна': 1}, {'хотеть': 1}, {'являться': 3}, {'актив': 1}, {'взять': 1}, {'инвестор': 1}, {'интерес': 1}, {'исключительно': 2}, {'смочь': 2}, {'упасть': 1}, {'фонд': 1}, {'дело': 1}, {'россиянин': 2}, {'входить': 1}, {'отметить': 2}, {'также': 2}, {'инвестиция': 5}, {'начать': 1}, {'финансовый': 1}, {'протяжение': 1}, {'ценовый': 3}, {'часть': 1}, {'кризис': 1}, {'отсутствие': 2}, {'агентство': 1}, {'директор': 2}, {'заинтересованы': 1}, {'инструмент': 3}, {'колебании': 2}, {'любои': 1}, {'маркетинг': 1}, {'речь': 1}, {'формирование': 1}, {'появление': 1}, {'камень': 4}, {'очередь': 1}, {'данном': 1}, {'эксперт': 5}, {'пора': 1}, {'увидеть': 1}, {'вкладывать': 2}, {'измениться': 1}, {'новостеи': 1}, {'перспектива': 1}, {'саит': 1}, {'вложение': 1}, {'розовый': 1}, {'ссылка': 1}, {'модный': 1}, {'редкость': 1}, {'среднем': 1}, {'целом': 1}, {'традиционный': 1}, {'валюта': 2}, {'девальвация': 1}, {'сохранение': 1}, {'неплохой': 1}, {'онлаин': 1}, {'задумываться': 1}, {'отличный': 1}, {'федеральный': 1}, {'инвестиционный': 1}, {'нестабильность': 1}, {'пояснить': 2}, {'бриллиант': 8}, {'вкладчик': 1}, {'нефтяной': 1}, {'старший': 1}, {'уточнять': 1}, {'исполнить': 1}, {'белый': 3}, {'инвестиционныи': 2}, {'сбережении': 1}, {'альтернативный': 1}, {'оставить': 1}, {'поездка': 1}, {'разбираться': 1}, {'уникальный': 1}, {'безумный': 1}, {'долгосрочнои': 1}, {'захотеть': 1}, {'пенсия': 1}, {'сохранить': 1}, {'зелёный': 1}, {'научиться': 1}, {'гарантировать': 1}, {'жёлтый': 1}, {'отличныи': 1}, {'способствовать': 1}, {'желанный': 1}, {'стабильно': 1}, {'финансово': 1}, {'мечта': 1}, {'алмаз': 2}, {'безуглый': 1}, {'вожделеннои': 1}, {'геополитическои': 1}, {'голубой': 1}, {'добыть': 1}, {'драгоценность': 2}, {'драгоценный': 3}, {'защищенныи': 1}, {'значимость': 1}, {'изделие': 2}, {'карат': 2}, {'лапидус': 1}, {'накопительный': 1}, {'непредвиденный': 1}, {'обменять': 1}, {'обналичить': 1}, {'обращаться': 1}, {'обрушение': 1}, {'отдохнуть': 1}, {'последующеи': 1}, {'правят': 1}, {'приумножающии': 1}, {'прозрачный': 1}, {'промежуток': 1}, {'разделиться': 1}, {'рапапорт': 1}, {'совершаться': 1}, {'столетии': 1}, {'существенный': 1}, {'тверды': 1}, {'украшении': 1}, {'уникально': 1}, {'уникальность': 1}, {'физически': 1}, {'цветной': 2}, {'ювелир': 1}, {'ювелирный': 2}], [{'весьма': 1}, {'внутренний': 1}, {'газ': 3}, {'год': 5}, {'начинать': 1}, {'оказаться': 1}, {'сказать': 3}, {'банк': 1}, {'бизнес': 2}, {'выдать': 1}, {'гражданин': 1}, {'знать': 1}, {'кредит': 1}, {'мочь': 1}, {'плохо': 1}, {'погашение': 3}, {'получить': 4}, {'представлять': 1}, {'прийтись': 1}, {'процент': 3}, {'собои': 1}, {'сумма': 3}, {'таким_образом': 1}, {'брать': 1}, {'говорить': 1}, {'граница': 2}, {'деньга': 3}, {'доступный': 1}, {'других': 1}, {'импорт': 4}, {'интересный': 1}, {'казаться': 1}, {'любимый': 1}, {'многие': 1}, {'называть': 1}, {'очень': 1}, {'различный': 1}, {'решить': 1}, {'самои': 1}, {'сеичас': 1}, {'сложиться': 1}, {'страна': 1}, {'считать': 1}, {'часто': 1}, {'чистый': 2}, {'экспорт': 7}, {'бакс': 1}, {'взять': 1}, {'другое': 1}, {'индустрия': 1}, {'коммерческий': 1}, {'млрд': 12}, {'остаться': 2}, {'составить': 4}, {'составлять': 1}, {'бесплатный': 1}, {'дело': 1}, {'доход': 2}, {'миллион': 1}, {'россиянин': 1}, {'строчка': 2}, {'перевести': 1}, {'получение': 1}, {'власть': 1}, {'другого': 1}, {'инвестиция': 2}, {'простой': 1}, {'прочее': 1}, {'тыс': 1}, {'украсть': 1}, {'приходиться': 1}, {'скорее': 1}, {'финансовый': 2}, {'дальше': 1}, {'предположить': 1}, {'часть': 1}, {'основный': 2}, {'баланс': 2}, {'включать': 1}, {'доллар': 10}, {'информация': 1}, {'любои': 1}, {'месяц': 1}, {'основа': 1}, {'понятный': 1}, {'прямой': 1}, {'статистика': 1}, {'сырьё': 1}, {'форма': 1}, {'картина': 1}, {'нефть': 3}, {'глава': 1}, {'государственный': 1}, {'потратить': 2}, {'банковский': 1}, {'миллионов_долларов': 1}, {'включая': 1}, {'итак': 1}, {'получаться': 1}, {'уйти': 2}, {'уплата': 1}, {'триллион': 1}, {'комиссия': 1}, {'операция': 4}, {'похожий': 1}, {'вложение': 1}, {'общем': 1}, {'саите': 1}, {'спросить': 1}, {'иностранный': 1}, {'платёж': 3}, {'подсчитать': 1}, {'сотня': 1}, {'среднем': 1}, {'каждыи': 1}, {'содержание': 1}, {'госдолг': 1}, {'долг': 2}, {'чёрный': 1}, {'самом_деле': 1}, {'жить': 2}, {'прийти': 2}, {'наглядно': 1}, {'якобы': 2}, {'выводить': 1}, {'выплата': 1}, {'допустить': 1}, {'карман': 1}, {'дивиденд': 2}, {'инвестиционный': 1}, {'обнал': 1}, {'резерв': 1}, {'избежать': 1}, {'коррупционер': 1}, {'поставить': 1}, {'оставить': 1}, {'забрать': 1}, {'старик': 1}, {'выплатить': 1}, {'нефтепродукт': 1}, {'выручка': 2}, {'бала': 1}, {'бюджета': 1}, {'бюллетень': 1}, {'ввезти': 1}, {'вертикали': 1}, {'верхнеи': 1}, {'вчерную': 1}, {'выгнать': 1}, {'выгон': 1}, {'границу': 1}, {'занижались': 1}, {'занизить': 1}, {'засвеченнои': 1}, {'захоронение': 1}, {'зполученную': 1}, {'иллюстрировать': 1}, {'инвоисы': 1}, {'красть': 1}, {'крышует': 1}, {'курировать': 1}, {'легальныи': 1}, {'летие': 1}, {'лишний': 1}, {'магницкого': 1}, {'мазде': 1}, {'мгновение': 1}, {'мзда': 1}, {'младенец': 1}, {'мораль': 1}, {'нынешнеи': 1}, {'обнала': 1}, {'ока': 1}, {'повсеместный': 1}, {'поступить': 1}, {'прое': 1}, {'промысел': 1}, {'пропуски': 1}, {'прочии': 1}, {'размах': 1}, {'ренты': 1}, {'сомнительный': 3}, {'таблица': 3}, {'фуфельные': 1}, {'чтоы': 1}, {'экспортнои': 1}], [{'год': 1}, {'заниматься': 1}, {'назад': 1}, {'невозможный': 1}, {'огромный': 1}, {'отказываться': 1}, {'подобный': 1}, {'попытаться': 1}, {'продажа': 2}, {'расти': 1}, {'рынок': 5}, {'сегодня': 1}, {'сказать': 1}, {'стоимость': 1}, {'цена': 2}, {'банк': 1}, {'вернуть': 1}, {'другои': 2}, {'знать': 2}, {'иначе': 1}, {'мочь': 3}, {'оставаться': 1}, {'получить': 3}, {'решение': 1}, {'связь': 1}, {'сми': 1}, {'таким_образом': 1}, {'увеличивать': 2}, {'уровень': 1}, {'большои': 1}, {'возможный': 2}, {'время': 2}, {'вывод': 1}, {'главный': 1}, {'говорить': 1}, {'делать': 1}, {'деньга': 3}, {'других': 1}, {'искать': 1}, {'кривой': 1}, {'менее': 1}, {'место': 1}, {'многие': 1}, {'наверняка': 1}, {'наиболее': 1}, {'общий': 1}, {'обычно': 1}, {'определять': 1}, {'последний': 1}, {'резко': 1}, {'случай': 1}, {'стать': 1}, {'учитывать': 2}, {'фактор': 1}, {'хотеть': 1}, {'часто': 1}, {'день': 2}, {'другие': 1}, {'неделя': 2}, {'падение': 1}, {'план': 2}, {'показатель': 1}, {'потеря': 1}, {'прибыть': 1}, {'туда': 1}, {'упасть': 1}, {'хотеться': 1}, {'час': 1}, {'вещь': 1}, {'дело': 1}, {'множество': 2}, {'прибыль': 2}, {'произойти': 1}, {'список': 1}, {'тысяча': 1}, {'биржа': 2}, {'биржах': 1}, {'биткоин': 1}, {'биткоина': 4}, {'больший': 1}, {'вопрос': 1}, {'немного': 3}, {'перевести': 1}, {'позиция': 1}, {'происходить': 2}, {'рост': 1}, {'сбросить': 1}, {'мелкий': 1}, {'назвать': 1}, {'недавно': 1}, {'необходимый': 1}, {'организовать': 1}, {'сделать': 1}, {'считанные': 1}, {'выглядеть': 2}, {'вызвать': 1}, {'образ': 1}, {'отправить': 1}, {'поддержка': 1}, {'скорее': 3}, {'дальше': 1}, {'днеи': 3}, {'дно': 1}, {'индикатор': 1}, {'кит': 3}, {'отметка': 1}, {'пара': 1}, {'основный': 1}, {'причина': 2}, {'аналитик': 1}, {'включать': 1}, {'доллар': 1}, {'информация': 1}, {'объём': 1}, {'огромныи': 1}, {'повышение': 1}, {'последующий': 1}, {'существовать': 1}, {'торговый': 1}, {'картина': 1}, {'ликвидность': 1}, {'сопротивление': 1}, {'десяток': 1}, {'долго': 1}, {'ранее': 1}, {'соблюдать': 1}, {'продолжать': 1}, {'собираться': 1}, {'миллионов_долларов': 1}, {'предполагать': 1}, {'намного': 1}, {'верить': 1}, {'забываите': 1}, {'произоидет': 1}, {'стабильный': 1}, {'помочь': 1}, {'криптовалютами': 1}, {'торговлеи': 1}, {'новость': 1}, {'операция': 1}, {'повестка': 1}, {'разорвать': 1}, {'доступ': 2}, {'куча': 1}, {'пропасть': 1}, {'сфера': 1}, {'значительно': 1}, {'издание': 1}, {'опровергнуть': 1}, {'целом': 2}, {'блокчеин': 1}, {'треидеров': 3}, {'валютои': 1}, {'всплеск': 1}, {'запас': 1}, {'плечо': 1}, {'довольно': 1}, {'разуметься': 1}, {'главныи': 1}, {'недавний': 1}, {'повод': 1}, {'быстро': 2}, {'заявка': 1}, {'пойти': 1}, {'умныи': 1}, {'скрыть': 1}, {'заключение': 1}, {'выводить': 1}, {'карман': 1}, {'короче': 1}, {'буквально': 1}, {'затем': 1}, {'крах': 1}, {'личность': 1}, {'подлинный': 1}, {'принести': 1}, {'ссылаться': 1}, {'доказательство': 1}, {'исполнитель': 2}, {'порекомендовать': 1}, {'наследие': 1}, {'прошлое': 1}, {'умирать': 1}, {'обстоятельство': 1}, {'реальность': 1}, {'неожиданность': 1}, {'катастрофа': 1}, {'компенсировать': 1}, {'следить': 1}, {'согласиться': 1}, {'подъём': 1}, {'ранний': 1}, {'накачать': 1}, {'сбой': 1}, {'круг': 1}, {'первоначальныи': 1}, {'торговле': 1}, {'устраивать': 1}, {'ралли': 1}, {'косвенный': 1}, {'вплоть': 1}, {'обращаться': 1}, {'поступить': 1}, {'биткоинов': 1}, {'броситься': 1}, {'впечатляющии': 1}, {'выдавить': 1}, {'глядя': 1}, {'держатель': 2}, {'динамично': 1}, {'запаниковать': 1}, {'запасу': 1}, {'инсаидеры': 1}, {'крипто': 2}, {'криптовалютных': 1}, {'манипуляция': 3}, {'неуклюже': 1}, {'низин': 1}, {'носить': 1},
fixtures in this request""" result = list(self._pyfuncitem._fixtureinfo.names_closure) result.extend(set(self._fixture_defs).difference(result)) return result @property def node(self): """ underlying collection node (depends on current request scope)""" return self._getscopeitem(self.scope) def _getnextfixturedef(self, argname): fixturedefs = self._arg2fixturedefs.get(argname, None) if fixturedefs is None: # we arrive here because of a dynamic call to # getfixturevalue(argname) usage which was naturally # not known at parsing/collection time parentid = self._pyfuncitem.parent.nodeid fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) self._arg2fixturedefs[argname] = fixturedefs # fixturedefs list is immutable so we maintain a decreasing index index = self._arg2index.get(argname, 0) - 1 if fixturedefs is None or (-index > len(fixturedefs)): raise FixtureLookupError(argname, self) self._arg2index[argname] = index return fixturedefs[index] @property def config(self): """ the pytest config object associated with this request. """ return self._pyfuncitem.config @scopeproperty() def function(self): """ test function object if the request has a per-function scope. """ return self._pyfuncitem.obj @scopeproperty("class") def cls(self): """ class (can be None) where the test function was collected. """ clscol = self._pyfuncitem.getparent(_pytest.python.Class) if clscol: return clscol.obj @property def instance(self): """ instance (can be None) on which test function was collected. """ # unittest support hack, see _pytest.unittest.TestCaseFunction try: return self._pyfuncitem._testcase except AttributeError: function = getattr(self, "function", None) return getattr(function, "__self__", None) @scopeproperty() def module(self): """ python module object where the test function was collected. """ return self._pyfuncitem.getparent(_pytest.python.Module).obj @scopeproperty() def fspath(self): """ the file system path of the test module which collected this test. """ return self._pyfuncitem.fspath @property def keywords(self): """ keywords/markers dictionary for the underlying node. """ return self.node.keywords @property def session(self): """ pytest session object. """ return self._pyfuncitem.session def addfinalizer(self, finalizer): """ add finalizer/teardown function to be called after the last test within the requesting test context finished execution. """ # XXX usually this method is shadowed by fixturedef specific ones self._addfinalizer(finalizer, scope=self.scope) def _addfinalizer(self, finalizer, scope): colitem = self._getscopeitem(scope) self._pyfuncitem.session._setupstate.addfinalizer( finalizer=finalizer, colitem=colitem ) def applymarker(self, marker): """ Apply a marker to a single test function invocation. This method is useful if you don't want to have a keyword/marker on all function invocations. :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object created by a call to ``pytest.mark.NAME(...)``. """ self.node.add_marker(marker) def raiseerror(self, msg): """ raise a FixtureLookupError with the given message. """ raise self._fixturemanager.FixtureLookupError(None, self, msg) def _fillfixtures(self): item = self._pyfuncitem fixturenames = getattr(item, "fixturenames", self.fixturenames) for argname in fixturenames: if argname not in item.funcargs: item.funcargs[argname] = self.getfixturevalue(argname) def getfixturevalue(self, argname): """ Dynamically run a named fixture function. Declaring fixtures via function argument is recommended where possible. But if you can only decide whether to use another fixture at test setup time, you may use this function to retrieve it inside a fixture or test function body. """ return self._get_active_fixturedef(argname).cached_result[0] def getfuncargvalue(self, argname): """ Deprecated, use getfixturevalue. """ from _pytest import deprecated warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) return self.getfixturevalue(argname) def _get_active_fixturedef(self, argname): try: return self._fixture_defs[argname] except KeyError: try: fixturedef = self._getnextfixturedef(argname) except FixtureLookupError: if argname == "request": cached_result = (self, [0], None) scope = "function" return PseudoFixtureDef(cached_result, scope) raise # remove indent to prevent the python3 exception # from leaking into the call self._compute_fixture_value(fixturedef) self._fixture_defs[argname] = fixturedef return fixturedef def _get_fixturestack(self): current = self values = [] while 1: fixturedef = getattr(current, "_fixturedef", None) if fixturedef is None: values.reverse() return values values.append(fixturedef) current = current._parent_request def _compute_fixture_value(self, fixturedef): """ Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will force the FixtureDef object to throw away any previous results and compute a new fixture value, which will be stored into the FixtureDef object itself. :param FixtureDef fixturedef: """ # prepare a subrequest object before calling fixture function # (latter managed by fixturedef) argname = fixturedef.argname funcitem = self._pyfuncitem scope = fixturedef.scope try: param = funcitem.callspec.getparam(argname) except (AttributeError, ValueError): param = NOTSET param_index = 0 has_params = fixturedef.params is not None fixtures_not_supported = getattr(funcitem, "nofuncargs", False) if has_params and fixtures_not_supported: msg = ( "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" "Node id: {nodeid}\n" "Function type: {typename}" ).format( name=funcitem.name, nodeid=funcitem.nodeid, typename=type(funcitem).__name__, ) fail(msg, pytrace=False) if has_params: frame = inspect.stack()[3] frameinfo = inspect.getframeinfo(frame[0]) source_path = frameinfo.filename source_lineno = frameinfo.lineno source_path = py.path.local(source_path) if source_path.relto(funcitem.config.rootdir): source_path = source_path.relto(funcitem.config.rootdir) msg = ( "The requested fixture has no parameter defined for test:\n" " {}\n\n" "Requested fixture '{}' defined in:\n{}" "\n\nRequested here:\n{}:{}".format( funcitem.nodeid, fixturedef.argname, getlocation(fixturedef.func, funcitem.config.rootdir), source_path, source_lineno, ) ) fail(msg, pytrace=False) else: param_index = funcitem.callspec.indices[argname] # if a parametrize invocation set a scope it will override # the static scope defined with the fixture function paramscopenum = funcitem.callspec._arg2scopenum.get(argname) if paramscopenum is not None: scope = scopes[paramscopenum] subrequest = SubRequest(self, scope, param, param_index, fixturedef) # check if a higher-level scoped fixture accesses a lower level one subrequest._check_scope(argname, self.scope, scope) # clear sys.exc_info before invoking the fixture (python bug?) # if it's not explicitly cleared it will leak into the call exc_clear() try: # call the fixture function fixturedef.execute(request=subrequest) finally: # if fixture function failed it might have registered finalizers self.session._setupstate.addfinalizer( functools.partial(fixturedef.finish, request=subrequest), subrequest.node, ) def _check_scope(self, argname, invoking_scope, requested_scope): if argname == "request": return if scopemismatch(invoking_scope, requested_scope): # try to report something helpful lines = self._factorytraceback() fail( "ScopeMismatch: You tried to access the %r scoped " "fixture %r with a %r scoped request object, " "involved factories\n%s" % ((requested_scope, argname, invoking_scope, "\n".join(lines))), pytrace=False, ) def _factorytraceback(self): lines = [] for fixturedef in self._get_fixturestack(): factory = fixturedef.func fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = _format_args(factory) lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args)) return lines def _getscopeitem(self, scope): if scope == "function": # this might also be a non-function Item despite its attribute name return self._pyfuncitem if scope == "package": node = get_scope_package(self._pyfuncitem, self._fixturedef) else: node = get_scope_node(self._pyfuncitem, scope) if node is None and scope == "class": # fallback to function item itself node = self._pyfuncitem assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( scope, self._pyfuncitem ) return node def __repr__(self): return "<FixtureRequest for %r>" % (self.node) class SubRequest(FixtureRequest): """ a sub request for handling getting a fixture from a test function/fixture. """ def __init__(self, request, scope, param, param_index, fixturedef): self._parent_request = request self.fixturename = fixturedef.argname if param is not NOTSET: self.param = param self.param_index = param_index self.scope = scope self._fixturedef = fixturedef self._pyfuncitem = request._pyfuncitem self._fixture_defs = request._fixture_defs self._arg2fixturedefs = request._arg2fixturedefs self._arg2index = request._arg2index self._fixturemanager = request._fixturemanager def __repr__(self): return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem) def addfinalizer(self, finalizer): self._fixturedef.addfinalizer(finalizer) class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which which has a lower scope (e.g. a Session one calls a function one) """ scopes = "session package module class function".split() scopenum_function = scopes.index("function") def scopemismatch(currentscope, newscope): return scopes.index(newscope) > scopes.index(currentscope) def scope2index(scope, descr, where=None): """Look up the index of ``scope`` and raise a descriptive value error if not defined. """ try: return scopes.index(scope) except ValueError: fail( "{} {}got an unexpected scope value '{}'".format( descr, "from {} ".format(where) if where else "", scope ), pytrace=False, ) class FixtureLookupError(LookupError): """ could not return a requested Fixture (missing or invalid). """ def __init__(self, argname, request, msg=None): self.argname = argname self.request = request self.fixturestack = request._get_fixturestack() self.msg = msg def formatrepr(self): tblines = [] addline = tblines.append stack = [self.request._pyfuncitem.obj] stack.extend(map(lambda x: x.func, self.fixturestack)) msg = self.msg if msg is not None: # the last fixture raise an error, let's present # it at the requesting side stack = stack[:-1] for function in stack: fspath, lineno = getfslineno(function) try: lines, _ = inspect.getsourcelines(get_real_func(function)) except (IOError, IndexError, TypeError): error_msg = "file %s, line %s: source code not available" addline(error_msg % (fspath, lineno + 1)) else: addline("file %s, line %s" % (fspath, lineno + 1)) for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) if line.lstrip().startswith("def"): break if msg is None: fm = self.request._fixturemanager available = set() parentid = self.request._pyfuncitem.parent.nodeid for name, fixturedefs in fm._arg2fixturedefs.items(): faclist = list(fm._matchfactories(fixturedefs, parentid)) if faclist: available.add(name) if self.argname in available: msg = " recursive dependency involving fixture
): histOutFile = os.path.join( Ddata, 'hist', scenDir, AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ), snpStat, replicaCondSfx, snpCondSfx, scenSfx, sfx ) ) rule = pr.addInvokeRule( invokeFn = histogramSnpStatistic, invokeArgs = dict( outFile = histOutFile, **Dict( 'Ddata thinSfx replicaTables replicaCond snpTables snpCond ' 'snpStat nreplicas binSize binShift scenDir scenSfx sfx' ) ), name = name, comment = 'Compute distribution of ' + snpStat + ' for SNPs matching ' + snpCond + ' in replicas matching ' + replicaCond ) histFiles.append( histOutFile ) totaledHistFile = os.path.join( Ddata, 'hist', AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ), snpCondSfx, replicaCondSfx, sfx ) ) totaledHistFiles.append( totaledHistFile ) totaledLabel = '' if replicaCondSfx: totaledLabel += replicaCondSfx + ' replicas' + ( (' (' + replicaCond + ') ') \ if replicaCond != 'True' else '' ) if snpCondSfx: totaledLabel += snpCondSfx + ' SNPs' + ( (' (' + snpCond + ') ') \ if snpCond != 'True' else '' ) totaledLabels.append( totaledLabel ) pr.addInvokeRule( invokeFn = AddUpHistograms, invokeArgs = dict( histFiles = histFiles, outFile = totaledHistFile ), mediumRuleNameSfx = ( sfx, snpStat, replicaCondSfx, snpCondSfx ), name = 'AddUpSnpHists', fileDescrs = { 0: ( 'Distribution of <b>' + snpStat + '</b> among ' + ( 'all SNPs' if snpCond == 'True' else ' snps matching <em>' + snpCond + '</em>' ) + ' in ' + ( 'all replicas' if replicaCond == 'True' else 'replicas matching <em>' + replicaCond + '</em>' ) + ' in ' + ( 'all scenarios' if scenCond == 'True' else 'scenarios matching <em>' + scenCond + '</em>' ), ( ( 'count', 'Number of SNPs with ' + snpStat + ' in given bin' ),) ) } ) if not title: title = 'Histogram of ' + snpStat + '\n' if scenCond != 'True': title += ' scenCond: ' + scenCond if any( replicaCond != 'True' for replicaCond in replicaConds ): title += ' replicaConds: ' + ', '.join(replicaCondsSfxs) if any( snpCond != 'True' for snpCond in snpConds ): title += ' snpConds: ' + ', '.join(snpCondsSfxs) title = titlePrefix + title if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of snps' if not xlabel: xlabel = snpStat pr.addInvokeRule( invokeFn = GraphHistograms, mediumRuleNameSfx = (snpStat,) + tuple(replicaCondsSfxs) + tuple(snpCondsSfxs), name = 'GraphSnpHists', invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels, **Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile ' 'cumulative normed ticksCoarsen colors' ) ), attrs = Dict( 'snpStat replicaConds snpConds scenCond subplots_adjust' ) ) def DefineRulesTo_histogramReplicaStatistic( pr, Ddata, outFile, replicaStat, binSize, scenCond = 'True', replicaTables = None, sfx = '', scen2sfxs = lambda scen: '', allScens = tuple( GetScenarios() ), nreplicas = 100, thinSfx = '', replicaConds = 'True', replicaCondsSfxs = '', title = '', titlePrefix = '', xlabel = '', ylabel = '', xbound = None, ybound = None, log = False, coarsenBy = None, ticksCoarsen = 1, cumulative = False, normed = False, cumulativeUpTo = 0.99, subplots_adjust = {}, name = None, nameSfx = '' ): """Define rules to plot the distribution of a specified per-replica statistic for some subsets of replicas in some subset of scenarios. Params: pr - the PipeRun object to which the rules should be added Ddata - the root folder of the genetic data in simulations format outFile - the filename to which the histogram plot will be written replicaTables - names of tables containing per-replica values. For each such table T, there must be a file of the form os.path.join( Ddata, replicastats, scenario.scenDir(), T + '.tsv' ) giving some values for each replica in the scenario. replicaStat - a Python expression in which the names in replicaTables may appear as variables, and refer to a named tuple representing the replica's row in the corresponding replicaTable. Notes: - for histogramming should not need to load it all into memory. can do a pre-pass to just get the range of values, define the bins, then do a second pass to count what goes in what bin. could also add bins as we go. so, really just need to know bin size, and then can do all this with one pass. can also, later, make this automatically parallelized. """ if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile ) scenCondExpr = compile_expr( scenCond ) ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ] if callable( scen2sfxs ): scen2sfxs = dict( ( scen, scen2sfxs( scen ) ) for scen in ourScens ) replicaConds = MakeSeq( replicaConds ) replicaCondsSfxs = MakeSeq( replicaCondsSfxs ) totaledHistFiles = [] totaledLabels = [] for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ): totaledHistFile = os.path.join( Ddata, 'hist', ReplaceFileExt( os.path.basename( outFile ), '.tsv' ) ) totaledLabels.append( replicaCondSfx + ': ' + replicaCond ) r = pr.addInvokeRule( invokeFn = histogramReplicaStatistic, invokeArgs = Dict( 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas ' 'binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx', outFile = totaledHistFile ), mediumRuleNameSfx = ( replicaStat, replicaCondSfx, sfx ), fileDescrs = { 0: ( 'Distribution of <b>' + replicaStat + '</b> among ' + ( 'all replicas' if replicaCond == 'True' else 'replicas matching <em>' + replicaCond + '</em>' ) + ' in ' + ( 'all scenarios' if scenCond == 'True' else 'scenarios matching <em>' + scenCond + '</em>' ), ( ( 'count', 'Number of replicas with ' + replicaStat + ' in given bin' ), )) } ) totaledHistFiles.append( r.creates[0] ) if not title: if scenCond != 'True': title += ' scenCond: ' + scenCond if len( replicaConds ) == 1 and replicaConds[0] != 'True': title += ' replicaCond: ' + replicaConds[0] title = titlePrefix + title if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of replicas' if not xlabel: xlabel = replicaStat pr.addInvokeRule( invokeFn = GraphHistograms, invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels, **Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile ' 'sfx ticksCoarsen cumulative normed cumulativeUpTo' ) ), name = 'GraphReplicaHists' + Sfx( nameSfx ), mediumRuleNameSfx = ( replicaStat, sfx ) + tuple( replicaConds ), attrs = Dict( 'replicaStat sfx subplots_adjust' ) ) return totaledHistFiles def identifyReplicasMeetingConds( Ddata, scenario, replicaTables, replicaConds, condsFileFN, nreplicas, thinSfx = '', getio = None ): """Given a list of named replica conditions, determine for each replica which conditions it meets, and write out the result in an easy-to-access format. Input params: replicaConds - sequence of pairs of the form ( condName, cond ) -- for example, ( ( 'hi', 'replicaStats.causalAlleleFreq >= .5' ), ( 'lo', 'replicaStats.causalAlleleFreq < .5' ) ) """ replicaTables = MakeSeq( replicaTables ) replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(), replicaTable + ( '.tsv' if not os.path.splitext( replicaTable )[1] else '' ) ) for replicaTable in replicaTables ] if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(), condsFileFN ) if getio: return dict( depends_on = replicaTableFiles, creates = condsFileFN, mediumRuleNameSfx = scenario.scenDir(), attrs = dict( piperun_short = True, condNames = ', '.join( map( operator.itemgetter( 0 ), replicaConds ) ) ) ) replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ] assert all([ len( replicaTableVal ) == nreplicas for replicaTableVal in replicaTableVals ]) matchingReplicas = [] for replicaCond in map( operator.itemgetter( 1 ), replicaConds ): replicaCondExpr = compile_expr( replicaCond ) replicasToUse = [ int( eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) ) ) for replicaTableRows in izip( *replicaTableVals ) ] matchingReplicas.append( replicasToUse ) Records = [] condNames = tuple( map( operator.itemgetter( 0 ), replicaConds ) ) for replicaNum, condResults in enumerate( izip( *matchingReplicas ) ): Records.append( ( replicaNum, ','.join( replicaCondName for condNum, replicaCondName in enumerate( condNames ) if condResults[ condNum ] ) ) + condResults ) IDotData( names
smaller than the first, the LSTs are treated as having phase-wrapped around LST = 2*pi = 0, and the LSTs kept on the object will run from the larger value, through 0, and end at the smaller value. polarizations : array_like of int, optional The polarizations numbers to include when reading data into the object, each value passed here should exist in the polarization_array. Ignored if read_data is False. blt_inds : array_like of int, optional The baseline-time indices to include when reading data into the object. This is not commonly used. Ignored if read_data is False. keep_all_metadata : bool Option to keep all the metadata associated with antennas, even those that do not have data associated with them after the select option. read_data : bool Read in the visibility and flag data. If set to false, only the basic header info and metadata will be read in. Setting read_data to False results in an incompletely defined object (check will not pass). data_array_dtype : numpy dtype Datatype to store the output data_array as. Must be either np.complex64 (single-precision real and imaginary) or np.complex128 (double- precision real and imaginary). Only used if the datatype of the visibility data on-disk is not 'c8' or 'c16'. multidim_index : bool [Only for HDF5] If True, attempt to index the HDF5 dataset simultaneously along all data axes. Otherwise index one axis at-a-time. This only works if data selection is sliceable along all but one axis. If indices are not well-matched to data chunks, this can be slow. background_lsts : bool When set to True, the lst_array is calculated in a background thread. run_check : bool Option to check for the existence and proper shapes of parameters after after reading in the file (the default is True, meaning the check will be run). Ignored if read_data is False. check_extra : bool Option to check optional parameters as well as required ones (the default is True, meaning the optional parameters will be checked). Ignored if read_data is False. run_check_acceptability : bool Option to check acceptable range of the values of parameters after reading in the file (the default is True, meaning the acceptable range check will be done). Ignored if read_data is False. strict_uvw_antpos_check : bool Option to raise an error rather than a warning if the check that uvws match antenna positions does not pass. fix_old_proj : bool Applies a fix to uvw-coordinates and phasing, assuming that the old `phase` method was used prior to writing the data, which had errors of the order of one part in 1e4 - 1e5. See the phasing memo for more details. Default is to apply the correction if the attributes `phase_center_app_ra` and `phase_center_app_dec` are missing (as they were introduced alongside the new phasing method). fix_use_ant_pos : bool If setting `fix_old_proj` to True, use the antenna positions to derive the correct uvw-coordinates rather than using the baseline vectors. Default is True. Raises ------ IOError If filename doesn't exist. ValueError If the data_array_dtype is not a complex dtype. If incompatible select keywords are set (e.g. `ant_str` with other antenna selectors, `times` and `time_range`) or select keywords exclude all data or if keywords are set to the wrong type. """ from . import uvh5 if isinstance(filename, (list, tuple, np.ndarray)): raise ValueError( "Reading multiple files from class specific " "read functions is no longer supported. " "Use the generic `uvdata.read` function instead." ) uvh5_obj = uvh5.UVH5() uvh5_obj.read_uvh5( filename, antenna_nums=antenna_nums, antenna_names=antenna_names, ant_str=ant_str, bls=bls, frequencies=frequencies, freq_chans=freq_chans, times=times, time_range=time_range, lsts=lsts, lst_range=lst_range, polarizations=polarizations, blt_inds=blt_inds, data_array_dtype=data_array_dtype, keep_all_metadata=keep_all_metadata, read_data=read_data, multidim_index=multidim_index, background_lsts=background_lsts, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, strict_uvw_antpos_check=strict_uvw_antpos_check, fix_old_proj=fix_old_proj, fix_use_ant_pos=fix_use_ant_pos, ) self._convert_from_filetype(uvh5_obj) del uvh5_obj def read( self, filename, axis=None, file_type=None, allow_rephase=True, phase_center_radec=None, unphase_to_drift=False, phase_frame="icrs", phase_epoch=None, orig_phase_frame=None, phase_use_ant_pos=True, antenna_nums=None, antenna_names=None, ant_str=None, bls=None, frequencies=None, freq_chans=None, times=None, polarizations=None, blt_inds=None, time_range=None, keep_all_metadata=True, read_data=True, phase_type=None, correct_lat_lon=True, use_model=False, data_column="DATA", pol_order="AIPS", data_array_dtype=np.complex128, nsample_array_dtype=np.float32, use_aoflagger_flags=None, use_cotter_flags=None, remove_dig_gains=True, remove_coarse_band=True, correct_cable_len=False, correct_van_vleck=False, cheby_approx=True, flag_small_auto_ants=True, flag_small_sig_ants=None, propagate_coarse_flags=True, flag_init=True, edge_width=80e3, start_flag="goodtime", end_flag=0.0, flag_dc_offset=True, remove_flagged_ants=True, phase_to_pointing_center=False, skip_bad_files=False, multidim_index=False, background_lsts=True, run_check=True, check_extra=True, run_check_acceptability=True, strict_uvw_antpos_check=False, isource=None, irec=None, isb=None, corrchunk=None, pseudo_cont=False, lsts=None, lst_range=None, calc_lst=True, fix_old_proj=None, fix_use_ant_pos=True, make_multi_phase=False, ignore_name=False, ): """ Read a generic file into a UVData object. Parameters ---------- filename : str or array_like of str The file(s) or list(s) (or array(s)) of files to read from. file_type : str One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None. If None, the code attempts to guess what the file type is. For miriad and ms types, this is based on the standard directory structure. For FHD, uvfits and uvh5 files it's based on file extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5). Note that if a list of datasets is passed, the file type is determined from the first dataset. axis : str Axis to concatenate files along. This enables fast concatenation along the specified axis without the normal checking that all other metadata agrees. This method does not guarantee correct resulting objects. Please see the docstring for fast_concat for details. Allowed values are: 'blt', 'freq', 'polarization'. Only used if multiple files are passed. allow_rephase : bool Allow rephasing of phased file data so that data from files with different phasing can be combined. phase_center_radec : array_like of float The phase center to phase the files to before adding the objects in radians (in the ICRS frame). If set to None and multiple files are read with different phase centers, the phase center of the first file will be used. unphase_to_drift : bool Unphase the data from the files before combining them. phase_frame : str The astropy frame to phase to. Either 'icrs' or 'gcrs'. 'gcrs' accounts for precession & nutation, 'icrs' accounts for precession, nutation & abberation. Only used if `phase_center_radec` is set. orig_phase_frame : str The original phase frame of the data (if it is already phased). Used for unphasing, only if `unphase_to_drift` or `phase_center_radec` are set. Defaults to using the 'phase_center_frame' attribute or 'icrs' if that attribute is None. phase_use_ant_pos : bool If True, calculate the phased or unphased uvws directly from the antenna positions rather than from the existing uvws. Only used if `unphase_to_drift` or `phase_center_radec` are set. antenna_nums : array_like of int, optional The antennas numbers to include when reading data into the object (antenna positions and names for the removed antennas will be retained unless `keep_all_metadata` is False). This cannot be provided if `antenna_names` is also provided. antenna_names : array_like of str, optional The antennas names to include when reading data into the object (antenna positions and names for the removed antennas will be retained unless `keep_all_metadata` is False). This cannot be provided if `antenna_nums` is also provided. bls : list of tuple, optional A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines to include when reading data into the object. For length-2 tuples, the ordering of the numbers within the tuple does not matter. For length-3 tuples, the polarization string is in the order of the two antennas. If length-3 tuples are provided, `polarizations` must be None. ant_str : str, optional A string containing information about what antenna numbers and polarizations to include when reading data into the object. Can be 'auto', 'cross', 'all', or combinations of antenna numbers and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more examples of valid strings and the behavior of different forms for ant_str. If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will be kept for both baselines (1, 2) and (2, 3) to return a valid pyuvdata object. An ant_str cannot be passed in addition to any of `antenna_nums`, `antenna_names`, `bls` args or the `polarizations` parameters, if it is a ValueError will be raised. frequencies : array_like of float, optional The frequencies to include when reading data into the object, each value passed here should
0.0, -1, -1]) self.assertEqual(self.DUT.lst_p_tpu, [200.0, 300.0, 625.0, 500.0, 1000.0]) self.assertEqual(self.DUT.lst_p_tpupw, [19.17808219178082, 35.0, 56.81818181818182, 57.37704918032787, 58.333333333333336]) @attr(all=True, unit=True) def test16c_assess_plan_feasibility_no_test_units(self): """ (TestGrowth) assess_plan_feasibility should return a list of integers with the fifth position equal to the test phase with zero test units """ self.DUT.lst_p_ms = [0.95, 0.9, 0.90, 0.8, 0.75] self.DUT.lst_p_fef = [0.7, 0.7, 0.7, 0.7, 0.7] self.DUT.lst_p_prob = [0.95, 0.975, 0.975, 0.99, 0.99] self.DUT.lst_p_mtbfa = [50.0, 66.0, 81.5, 95.0, 107.5] self.DUT.lst_p_mtbfi = [45.0, 60.0, 78.0, 90.0, 105.0] self.DUT.lst_p_start_date = [735599, 735689, 735764, 735856, 735964] self.DUT.lst_p_end_date = [735672, 735749, 735841, 735917, 736084] self.DUT.lst_p_test_time = [1000.0, 1500.0, 2500.0, 2000.0, 3000.0] self.DUT.lst_p_n_test_units = [5, 5, 0, 4, 3] self.DUT.lst_p_tpu = [0.0, 0.0, 0.0, 0.0, 0.0] self.DUT.lst_p_tpupw = [0.0, 0.0, 0.0, 0.0, 0.0] self.DUT.mtbfg = 110.0 self.DUT.mtbfgp = 150.0 self.DUT.avg_growth = 0.23 self.DUT.avg_fef = 0.7 self.DUT.n_phases = 5 self.assertEqual(self.DUT.assess_plan_feasibility(), [0.4090909090909091, 0.7333333333333333, 2, -1]) self.assertEqual(self.DUT.lst_p_tpu, [200.0, 300.0, 0.0, 500.0, 1000.0]) self.assertEqual(self.DUT.lst_p_tpupw, [19.17808219178082, 35.0, 0.0, 57.37704918032787, 58.333333333333336]) @attr(all=True, unit=True) def test16d_assess_plan_feasibility_same_dates(self): """ (TestGrowth) assess_plan_feasibility should return a list of integers with the the sixth position equal to the test phase when the phase start and end dates are the same """ self.DUT.lst_p_ms = [0.95, 0.9, 0.90, 0.8, 0.75] self.DUT.lst_p_fef = [0.7, 0.7, 0.7, 0.7, 0.7] self.DUT.lst_p_prob = [0.95, 0.975, 0.975, 0.99, 0.99] self.DUT.lst_p_mtbfa = [50.0, 66.0, 81.5, 95.0, 107.5] self.DUT.lst_p_mtbfi = [45.0, 60.0, 78.0, 90.0, 105.0] self.DUT.lst_p_start_date = [735599, 735689, 735764, 735856, 735964] self.DUT.lst_p_end_date = [735672, 735749, 735841, 735917, 735964] self.DUT.lst_p_test_time = [1000.0, 1500.0, 2500.0, 2000.0, 3000.0] self.DUT.lst_p_n_test_units = [5, 5, 4, 4, 3] self.DUT.lst_p_tpu = [0.0, 0.0, 0.0, 0.0, 0.0] self.DUT.lst_p_tpupw = [0.0, 0.0, 0.0, 0.0, 0.0] self.DUT.mtbfg = 110.0 self.DUT.mtbfgp = 150.0 self.DUT.avg_growth = 0.23 self.DUT.avg_fef = 0.7 self.DUT.n_phases = 5 self.assertEqual(self.DUT.assess_plan_feasibility(), [0.4090909090909091, 0.7333333333333333, -1, 4]) self.assertEqual(self.DUT.lst_p_tpu, [200.0, 300.0, 625.0, 500.0, 1000.0]) self.assertEqual(self.DUT.lst_p_tpupw, [19.17808219178082, 35.0, 56.81818181818182, 57.37704918032787, 0.0]) @attr(all=True, unit=True) def test18_estimate_crow_amsaa_exact(self): """ (TestGrowth) estimate_crow_amsaa should return False using exact failure times """ # For example data, see http://www.reliawiki.org/index.php/Crow-AMSAA_%28NHPP%29#Example_-_Parameter_Estimation self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8, 1], 11: [12, 719163, 0.0, 167.9, 1], 12: [13, 719163, 0.0, 229.2, 1], 13: [14, 719163, 0.0, 296.7, 1], 14: [15, 719163, 0.0, 320.6, 1], 15: [16, 719163, 0.0, 328.2, 1], 16: [17, 719163, 0.0, 366.2, 1], 17: [18, 719163, 0.0, 396.7, 1], 18: [19, 719163, 0.0, 421.1, 1], 19: [20, 719163, 0.0, 438.2, 1], 20: [21, 719163, 0.0, 501.2, 1], 21: [22, 719163, 0.0, 620.0, 1]} self.DUT.grouped = False self.assertFalse(self.DUT.estimate_crow_amsaa()) self.assertEqual(self.DUT.alpha_hat, [0.32279203293521613, 0.42394221488057504, 0.52924893703956533]) self.assertEqual(self.DUT.beta_hat, [0.51077399351295227, 0.6142103999317297, 0.8474287960726461]) @attr(all=True, unit=True) def test18a_estimate_crow_amsaa_grouped(self): """ (TestGrowth) estimate_crow_amsaa should return False using grouped failure times """ # For example data, see http://www.reliawiki.org/index.php/Crow-AMSAA_%28NHPP%29#Example_-_Parameter_Estimation self.DUT.dic_test_data = {0: [1, 719163, 0.0, 62.0, 12], 1: [2, 719163, 0.0, 100.0, 6], 2: [3, 719163, 62.0, 187.0, 15], 3: [4, 719163, 187.0, 210.0, 3], 4: [5, 719163, 210.0, 350.0, 18], 5: [6, 719163, 350.0, 500.0, 16]} self.DUT.grouped = True self.assertFalse(self.DUT.estimate_crow_amsaa()) self.assertEqual(self.DUT.alpha_hat, [0.38537715881590562, 0.44585433767531873, 0.50770467031237509]) self.assertEqual(self.DUT.beta_hat, [0.7231246075396317, 0.81360853965677293, 0.95456866230721715]) @attr(all=True, unit=True) def test19_calculate_crow_amsaa_mean(self): """ (TestGrowth) calculate_crow_amsaa_mean should return False on success """ self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8, 1], 11: [12, 719163, 0.0, 167.9, 1], 12: [13, 719163, 0.0, 229.2, 1], 13: [14, 719163, 0.0, 296.7, 1], 14: [15, 719163, 0.0, 320.6, 1], 15: [16, 719163, 0.0, 328.2, 1], 16: [17, 719163, 0.0, 366.2, 1], 17: [18, 719163, 0.0, 396.7, 1], 18: [19, 719163, 0.0, 421.1, 1], 19: [20, 719163, 0.0, 438.2, 1], 20: [21, 719163, 0.0, 501.2, 1], 21: [22, 719163, 0.0, 620.0, 1]} self.DUT.alpha_hat = [0.32279203293521613, 0.42394221488057504, 0.52924893703956533] self.DUT.beta_hat = [0.51077399351295227, 0.6142103999317297, 0.8474287960726461] self.assertFalse(self.DUT.calculate_crow_amsaa_mean()) self.assertEqual(self.DUT.cum_mean[0], [1.2984255368000672, 3.4602620492308573, 20.219964361430275]) self.assertEqual(self.DUT.cum_mean[3], [4.8432788731340963, 8.828139974276127, 16.120046994855421]) self.assertAlmostEqual(self.DUT.instantaneous_mean[0][1], 5.6336754) @attr(all=True, unit=True) def test20_calculate_cramer_vonmises_typei(self): """ (TestGrowth) calculate_cramer_vonmises should return False on success with Type I (time terminated) data """ self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8, 1], 11: [12, 719163, 0.0, 167.9, 1], 12: [13, 719163, 0.0, 229.2, 1], 13: [14, 719163, 0.0, 296.7, 1], 14: [15, 719163, 0.0, 320.6, 1], 15: [16, 719163, 0.0, 328.2, 1], 16: [17, 719163, 0.0, 366.2, 1], 17: [18, 719163, 0.0, 396.7, 1], 18: [19, 719163, 0.0, 421.1, 1], 19: [20, 719163, 0.0, 438.2, 1], 20: [21, 719163, 0.0, 501.2, 1], 21: [22, 719163, 0.0, 620.0, 1]} self.DUT.beta_hat = [0.51077399351295227, 0.6142103999317297, 0.8474287960726461] self.assertFalse(self.DUT.calculate_cramer_vonmises(t_star=650.0, type2=False)) self.assertAlmostEqual(self.DUT.cramer_vonmises, 0.03383013) @attr(all=True, unit=True) def test20a_calculate_cramer_vonmises_typeii(self): """ (TestGrowth) calculate_cramer_vonmises should return False on success with Type II (failure terminated) data """ self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8, 1], 11: [12, 719163, 0.0, 167.9, 1], 12: [13, 719163, 0.0, 229.2, 1], 13: [14, 719163, 0.0, 296.7, 1], 14: [15, 719163, 0.0, 320.6, 1], 15: [16, 719163, 0.0, 328.2, 1], 16: [17, 719163, 0.0, 366.2, 1], 17: [18, 719163, 0.0, 396.7, 1], 18: [19, 719163, 0.0, 421.1, 1], 19: [20, 719163, 0.0, 438.2, 1], 20: [21, 719163, 0.0, 501.2, 1], 21: [22, 719163, 0.0, 620.0, 1]} self.DUT.beta_hat = [0.51077399351295227, 0.6142103999317297, 0.8474287960726461] self.assertFalse(self.DUT.calculate_cramer_vonmises()) self.assertAlmostEqual(self.DUT.cramer_vonmises, 0.04488069) @attr(all=True, unit=True) def test21_calculate_chi_square_exact_failure_terminated(self): """ (TestGrowth) calculate_chi_square should return False on success with exact data """ self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8, 1], 11: [12, 719163, 0.0, 167.9, 1], 12: [13, 719163, 0.0, 229.2, 1], 13: [14, 719163, 0.0, 296.7, 1], 14: [15, 719163, 0.0, 320.6, 1], 15: [16, 719163, 0.0, 328.2, 1], 16: [17, 719163, 0.0, 366.2, 1], 17: [18, 719163, 0.0, 396.7, 1], 18: [19, 719163, 0.0, 421.1, 1], 19: [20, 719163, 0.0, 438.2, 1], 20: [21, 719163, 0.0, 501.2, 1], 21: [22, 719163, 0.0, 620.0, 1]} self.DUT.ttt = 620.0 self.DUT.beta_hat = [0.51077399351295227, 0.6142103999317297, 0.8474287960726461] self.DUT.grouped = False self.DUT.confidence = 0.90 self.DUT.test_termination_time = 0.0 self.assertFalse(self.DUT.calculate_chi_square()) self.assertAlmostEqual(self.DUT.chi_square, 71.6366900) self.assertAlmostEqual(self.DUT.chi2_critical_value[0], 58.1240377) self.assertAlmostEqual(self.DUT.chi2_critical_value[1], 28.1440495) @attr(all=True, unit=True) def test21a_calculate_chi_square_exact_time_terminated(self): """ (TestGrowth) calculate_chi_square should return False on success with exact data """ self.DUT.dic_test_data = {0: [1, 719163, 0.0, 2.7, 1], 1: [2, 719163, 0.0, 10.3, 1], 2: [3, 719163, 0.0, 12.5, 1], 3: [4, 719163, 0.0, 30.6, 1], 4: [5, 719163, 0.0, 57.0, 1], 5: [6, 719163, 0.0, 61.3, 1], 6: [7, 719163, 0.0, 80.0, 1], 7: [8, 719163, 0.0, 109.5, 1], 8: [9, 719163, 0.0, 125.0, 1], 9: [10, 719163, 0.0, 128.6, 1], 10: [11, 719163, 0.0, 143.8,
str(e)) return dialog = findReplaceCopyDialog(uniquecategoriesfrom, uniquecategoriesto) dialog.exec() dialog.show() if str(dialog.result()) == '1': replacelist = dialog.getresults() logger.info(replacelist) if len(replacelist) > 0: newcontents = [] for content in contents: for entry in replacelist: contentstring = json.dumps(content) contentstring = contentstring.replace(str(entry['from']), str(entry['to'])) logger.info(contentstring) newcontents.append(json.loads(contentstring)) else: newcontents = contents try: tofolderid = ContentListWidgetTo.currentcontent['id'] for newcontent in newcontents: status = tosumo.import_content_job_sync(tofolderid, newcontent, adminmode=toadminmode) self.updatecontentlist(ContentListWidgetTo, tourl, toid, tokey, toradioselected, todirectorylabel) return except Exception as e: logger.exception(e) self.errorbox('Something went wrong with the Destination:\n\n' + str(e)) return else: return else: self.errorbox('You have not made any selections.') return return def copycontent(self, ContentListWidgetFrom, ContentListWidgetTo, fromurl, fromid, fromkey, tourl, toid, tokey, fromradioselected, toradioselected, todirectorylabel): logger.info("Copying Content") if toradioselected == -3 or toradioselected == -4: #Admin or Global folders selected toadminmode=True else: toadminmode=False if fromradioselected == -3 or fromradioselected == -4: #Admin or Global folders selected fromadminmode=True else: fromadminmode=False try: selecteditems = ContentListWidgetFrom.selectedItems() if len(selecteditems) > 0: # make sure something was selected fromsumo = SumoLogic(fromid, fromkey, endpoint=fromurl) tosumo = SumoLogic(toid, tokey, endpoint=tourl) currentdir = ContentListWidgetTo.currentdirlist[-1] tofolderid = ContentListWidgetTo.currentcontent['id'] for selecteditem in selecteditems: for child in ContentListWidgetFrom.currentcontent['children']: if child['name'] == str(selecteditem.text()): item_id = child['id'] content = fromsumo.export_content_job_sync(item_id, adminmode=fromadminmode) status = tosumo.import_content_job_sync(tofolderid, content, adminmode=toadminmode) self.updatecontentlist(ContentListWidgetTo, tourl, toid, tokey, toradioselected, todirectorylabel) return else: self.errorbox('You have not made any selections.') return except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) return def create_folder(self, ContentListWidget, url, id, key, radioselected, directorylabel): if ContentListWidget.updated == True: if radioselected == -3 or radioselected == -4: # Admin or Global folders selected adminmode = True else: adminmode = False message = ''' Please enter the name of the folder you wish to create: ''' text, result = QtWidgets.QInputDialog.getText(self, 'Create Folder...', message) if result: for item in ContentListWidget.currentcontent['children']: if item['name'] == str(text): self.errorbox('That Directory Name Already Exists!') return try: logger.info("Creating New Folder in Personal Folder Tree") sumo = SumoLogic(id, key, endpoint=url) error = sumo.create_folder(str(text), str(ContentListWidget.currentcontent['id']), adminmode=adminmode) self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel) return except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) else: self.errorbox("Please update the directory list before trying to create a new folder.") return def delete_content(self, ContentListWidget, url, id, key, radioselected, directorylabel): logger.info("Deleting Content") if radioselected == -3 or radioselected == -4: #Admin or Global folders selected adminmode=True else: adminmode=False selecteditems = ContentListWidget.selectedItems() if len(selecteditems) > 0: # make sure something was selected message = "You are about to delete the following item(s):\n\n" for selecteditem in selecteditems: message = message + str(selecteditem.text()) + "\n" message = message + ''' This is exceedingly DANGEROUS!!!! Please be VERY, VERY, VERY sure you want to do this! You could lose quite a bit of work if you delete the wrong thing(s). If you are absolutely sure, type "DELETE" in the box below. ''' text, result = QtWidgets.QInputDialog.getText(self, 'Warning!!', message) if (result and (str(text) == 'DELETE')): try: sumo = SumoLogic(id, key, endpoint=url) for selecteditem in selecteditems: for child in ContentListWidget.currentcontent['children']: if child['name'] == str(selecteditem.text()): item_id = child['id'] result = sumo.delete_content_job_sync(item_id, adminmode=adminmode) self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel) return except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) else: self.errorbox('You need to select something before you can delete it.') return def contentradiobuttonchanged(self, ContentListWidget,url, id, key, radioselected, directorylabel, pushButtonContentDelete): ContentListWidget.currentdirlist = [] self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel) return def togglecontentbuttons(self, side, state): if side == 'left': self.pushButtonContentCopyRightToLeft.setEnabled(state) self.pushButtonContentFindReplaceCopyRightToLeft.setEnabled(state) self.pushButtonContentNewFolderLeft.setEnabled(state) self.pushButtonContentDeleteLeft.setEnabled(state) self.pushButtonContentBackupLeft.setEnabled(state) self.pushButtonContentRestoreLeft.setEnabled(state) elif side == 'right': self.pushButtonContentCopyLeftToRight.setEnabled(state) self.pushButtonContentFindReplaceCopyLeftToRight.setEnabled(state) self.pushButtonContentNewFolderRight.setEnabled(state) self.pushButtonContentDeleteRight.setEnabled(state) self.pushButtonContentBackupRight.setEnabled(state) self.pushButtonContentRestoreRight.setEnabled(state) def updatecontentlist(self, ContentListWidget, url, id, key, radioselected, directorylabel): sumo = SumoLogic(id, key, endpoint=url) if ContentListWidget.currentdirlist: currentdir = ContentListWidget.currentdirlist[-1] else: currentdir = {'name': None, 'id': 'TOP'} try: if (not ContentListWidget.currentcontent) or (currentdir['id'] == 'TOP'): if radioselected == -2: # if "Personal Folder" radio button is selected logger.info("Updating Personal Folder List") ContentListWidget.currentcontent = sumo.get_personal_folder() ContentListWidget.currentdirlist = [] dir = {'name': 'Personal Folder', 'id': 'TOP'} ContentListWidget.currentdirlist.append(dir) if 'children' in ContentListWidget.currentcontent: self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel) else: self.errorbox('Incorrect Credentials or Wrong Endpoint.') elif radioselected == -3: # if "Global Folders" radio button is selected logger.info("Updating Global Folder List") ContentListWidget.currentcontent = sumo.get_global_folder_sync(adminmode=True) # Rename dict key from "data" to "children" for consistency ContentListWidget.currentcontent['children'] = ContentListWidget.currentcontent.pop('data') ContentListWidget.currentdirlist = [] dir = {'name': 'Global Folders', 'id': 'TOP'} ContentListWidget.currentdirlist.append(dir) if 'children' in ContentListWidget.currentcontent: self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel) else: self.errorbox('Incorrect Credentials or Wrong Endpoint.') else: # "Admin Folders" must be selected logger.info("Updating Admin Folder List") ContentListWidget.currentcontent = sumo.get_admin_folder_sync(adminmode=False) ContentListWidget.currentdirlist = [] dir = {'name': 'Admin Recommended', 'id': 'TOP'} ContentListWidget.currentdirlist.append(dir) if 'children' in ContentListWidget.currentcontent: self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel) else: self.errorbox('Incorrect Credentials or Wrong Endpoint.') else: ContentListWidget.currentcontent = sumo.get_folder(currentdir['id']) self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel) except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) return return def doubleclickedcontentlist(self, item, ContentListWidget, url, id, key, radioselected, directorylabel): logger.info("Going Down One Content Folder") sumo = SumoLogic(id, key, endpoint=url) currentdir = ContentListWidget.currentdirlist[-1] if radioselected == -3: adminmode=True else: adminmode=False try: for child in ContentListWidget.currentcontent['children']: if (child['name'] == item.text()) and (child['itemType'] == 'Folder'): ContentListWidget.currentcontent = sumo.get_folder(child['id'], adminmode=adminmode) dir = {'name': item.text(), 'id': child['id']} ContentListWidget.currentdirlist.append(dir) except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel) def parentdircontentlist(self, ContentListWidget, url, id, key, radioselected, directorylabel): if ContentListWidget.updated: logger.info("Going Up One Content Folder") sumo = SumoLogic(id, key, endpoint=url) currentdir = ContentListWidget.currentdirlist[-1] if currentdir['id'] != 'TOP': parentdir = ContentListWidget.currentdirlist[-2] else: return try: if parentdir['id'] == 'TOP': ContentListWidget.currentdirlist = [] self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel) return else: ContentListWidget.currentdirlist.pop() ContentListWidget.currentcontent = sumo.get_folder(parentdir['id']) self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel) return except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) return def updatecontentlistwidget(self, ContentListWidget, url, id, key, radioselected, directorylabel): try: ContentListWidget.clear() sumo = SumoLogic(id, key, endpoint=url) for object in ContentListWidget.currentcontent['children']: item_name = '' # if radioselected == -3: # logger.info("Getting User info for Global Folder") # user_info = sumo.get_user(object['createdBy']) # item_name = '[' + user_info['firstName'] + ' ' + user_info['lastName'] + ']' item_name = item_name + object['name'] if object['itemType'] == 'Folder': item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name) item.setIcon(self.icons['Folder']) ContentListWidget.addItem(item) # populate the list widget in the GUI elif object['itemType'] == 'Search': item = QtWidgets.QListWidgetItem(self.icons['Search'], item_name) item.setIcon(self.icons['Search']) ContentListWidget.addItem(item) # populate the list widget in the GUI elif object['itemType'] == 'Dashboard': item = QtWidgets.QListWidgetItem(self.icons['Dashboard'], item_name) item.setIcon(self.icons['Dashboard']) ContentListWidget.addItem(item) # populate the list widget in the GUI elif object['itemType'] == 'Lookups': item = QtWidgets.QListWidgetItem(self.icons['Dashboard'], item_name) item.setIcon(self.icons['Lookups']) ContentListWidget.addItem(item) # populate the list widget in the GUI else: ContentListWidget.addItem(item_name) # populate the list widget in the GUI with no icon (fallthrough) dirname = '' for dir in ContentListWidget.currentdirlist: dirname = dirname + '/' + dir['name'] directorylabel.setText(dirname) ContentListWidget.updated = True # if we are in the root (Top) of the global folders then we can't manipulate stuff as the entries are actually users, not content # so turn off the buttons until we change folder type or move down a level currentdir = ContentListWidget.currentdirlist[-1] if currentdir['id'] == 'TOP' and radioselected == -3: self.togglecontentbuttons(ContentListWidget.side, False) else: self.togglecontentbuttons(ContentListWidget.side, True) except Exception as e: logger.exception(e) return def backupcontent(self, ContentListWidget, url, id, key, radioselected): logger.info("Backing Up Content") if radioselected == -3 or radioselected == -4: #Admin or Global folders selected adminmode=True else: adminmode=False selecteditems = ContentListWidget.selectedItems() if len(selecteditems) > 0: # make sure something was selected savepath = str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Backup Directory")) if os.access(savepath, os.W_OK): message = '' sumo = SumoLogic(id, key, endpoint=url) for selecteditem in selecteditems: for child in ContentListWidget.currentcontent['children']: if child['name'] == str(selecteditem.text()): item_id = child['id'] try: content = sumo.export_content_job_sync(item_id, adminmode=adminmode) savefilepath = pathlib.Path(savepath + r'/' + str(selecteditem.text()) + r'.json') if savefilepath: with savefilepath.open(mode='w') as filepointer: json.dump(content, filepointer) message = message + str(selecteditem.text()) + r'.json' + '\n' except Exception as e: logger.exception(e) self.errorbox('Something went wrong:\n\n' + str(e)) return self.infobox('Wrote files: \n\n' + message) else: self.errorbox("You don't have permissions to write to that directory") else: self.errorbox('No content selected.') return def restorecontent(self, ContentListWidget, url, id, key, radioselected, directorylabel): logger.info("Restoring Content") if ContentListWidget.updated == True: if 'id' in ContentListWidget.currentcontent: # make sure the current folder has a folder id filter = "JSON (*.json)" filelist, status = QtWidgets.QFileDialog.getOpenFileNames(self,
a range of files specified by a glob-style expression using a single wildcard character '*'. dims: tuple of positive int Dimensions of input image data, ordered with the fastest-changing dimension first. dtype: dtype or dtype specifier, optional, default 'int16' Numpy dtype of input stack data newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat' Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the requested `newdtype` - see numpy `astype()` method. casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe' Casting method to pass on to numpy's `astype()` method; see numpy documentation for details. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems (not s3). Returns --------- pair of (RDD, ntimepoints) RDD: sequence of keys, values pairs (call using flatMap) RDD Key: tuple of int zero-based indicies of position within original image volume RDD Value: numpy array of datatype series of values at position across loaded image volumes ntimepoints: int number of time points in returned series, determined from number of stack files found at dataPath newDtype: string string representation of numpy data type of returned blocks """ dataPath = self.__normalizeDatafilePattern(dataPath, ext) blockSize = parseMemoryString(blockSize) totalDim = reduce(lambda x_, y_: x_*y_, dims) dtype = dtypeFunc(dtype) if newDtype is None or newDtype == '': newDtype = str(dtype) elif newDtype == 'smallfloat': newDtype = str(smallestFloatType(dtype)) else: newDtype = str(newDtype) reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride) filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) if not filenames: raise IOError("No files found for path '%s'" % dataPath) dataSize = totalDim * len(filenames) * dtype.itemsize nblocks = max(dataSize / blockSize, 1) # integer division if len(dims) >= 3: # for 3D stacks, do calculations to ensure that # different planes appear in distinct files blocksPerPlane = max(nblocks / dims[-1], 1) pixPerPlane = reduce(lambda x_, y_: x_*y_, dims[:-1]) # all but last dimension # get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane # evenly. This will always be at least one. kUpdated = [x for x in range(1, blocksPerPlane+1) if not pixPerPlane % x][-1] nblocks = kUpdated * dims[-1] blockSizePerStack = (totalDim / nblocks) * dtype.itemsize else: # otherwise just round to make contents divide into nearly even blocks blockSizePerStack = int(math.ceil(totalDim / float(nblocks))) nblocks = int(math.ceil(totalDim / float(blockSizePerStack))) blockSizePerStack *= dtype.itemsize fileSize = totalDim * dtype.itemsize def readBlock(blockNum): # copy size out from closure; will modify later: blockSizePerStack_ = blockSizePerStack # get start position for this block position = blockNum * blockSizePerStack_ # adjust if at end of file if (position + blockSizePerStack_) > fileSize: blockSizePerStack_ = int(fileSize - position) # loop over files, loading one block from each bufs = [] for fname in filenames: buf = reader.read(fname, startOffset=position, size=blockSizePerStack_) bufs.append(frombuffer(buf, dtype=dtype)) buf = vstack(bufs).T # dimensions are now linindex x time (images) del bufs buf = buf.astype(newDtype, casting=casting, copy=False) # append subscript keys based on dimensions itemPosition = position / dtype.itemsize itemBlocksize = blockSizePerStack_ / dtype.itemsize linearIdx = arange(itemPosition, itemPosition + itemBlocksize) # zero-based keys = zip(*map(tuple, unravel_index(linearIdx, dims, order='F'))) return zip(keys, buf) # map over blocks return (self.sc.parallelize(range(0, nblocks), nblocks).flatMap(lambda bn: readBlock(bn)), len(filenames), newDtype) @staticmethod def __readMetadataFromFirstPageOfMultiTif(reader, filePath): import thunder.rdds.fileio.multitif as multitif # read first page of first file to get expected image size tiffFP = reader.open(filePath) tiffParser = multitif.TiffParser(tiffFP, debug=False) tiffHeaders = multitif.TiffData() tiffParser.parseFileHeader(destinationTiff=tiffHeaders) firstIfd = tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders) if not firstIfd.isLuminanceImage(): raise ValueError(("File %s does not appear to be a luminance " % filePath) + "(greyscale or bilevel) TIF image, " + "which are the only types currently supported") # keep reading pages until we reach the end of the file, in order to get number of planes: while tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders): pass # get dimensions npages = len(tiffHeaders.ifds) height = firstIfd.getImageHeight() width = firstIfd.getImageWidth() # get datatype bitsPerSample = firstIfd.getBitsPerSample() if not (bitsPerSample in (8, 16, 32, 64)): raise ValueError("Only 8, 16, 32, or 64 bit per pixel TIF images are supported, got %d" % bitsPerSample) sampleFormat = firstIfd.getSampleFormat() if sampleFormat == multitif.SAMPLE_FORMAT_UINT: dtStr = 'uint' elif sampleFormat == multitif.SAMPLE_FORMAT_INT: dtStr = 'int' elif sampleFormat == multitif.SAMPLE_FORMAT_FLOAT: dtStr = 'float' else: raise ValueError("Unknown TIF SampleFormat tag value %d, should be 1, 2, or 3 for uint, int, or float" % sampleFormat) dtype = dtStr+str(bitsPerSample) return height, width, npages, dtype def _getSeriesBlocksFromMultiTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False): import thunder.rdds.fileio.multitif as multitif import itertools from PIL import Image import io dataPath = self.__normalizeDatafilePattern(dataPath, ext) blockSize = parseMemoryString(blockSize) reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride) filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) if not filenames: raise IOError("No files found for path '%s'" % dataPath) ntimepoints = len(filenames) doMinimizeReads = dataPath.lower().startswith("s3") or dataPath.lower().startswith("gs") # check PIL version to see whether it is actually pillow or indeed old PIL and choose # conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array # for more explanation. isPillow = hasattr(Image, "PILLOW_VERSION") if isPillow: conversionFcn = array # use numpy's array() function else: from thunder.utils.common import pil_to_array conversionFcn = pil_to_array # use our modified version of matplotlib's pil_to_array height, width, npages, dtype = SeriesLoader.__readMetadataFromFirstPageOfMultiTif(reader, filenames[0]) if dtype.startswith('int'): raise ValueError('Signed integer tiff images are not supported in SeriesLoader (shuffle=False);' + ' please try loading as Images (shuffle=True)') pixelBytesize = dtypeFunc(dtype).itemsize if newDtype is None or str(newDtype) == '': newDtype = str(dtype) elif newDtype == 'smallfloat': newDtype = str(smallestFloatType(dtype)) else: newDtype = str(newDtype) # intialize at one block per plane bytesPerPlane = height * width * pixelBytesize * ntimepoints bytesPerBlock = bytesPerPlane blocksPerPlane = 1 # keep dividing while cutting our size in half still leaves us bigger than the requested size # should end up no more than 2x blockSize. while bytesPerBlock >= blockSize * 2: bytesPerBlock /= 2 blocksPerPlane *= 2 blocklenPixels = max((height * width) / blocksPerPlane, 1) # integer division while blocksPerPlane * blocklenPixels < height * width: # make sure we're reading the plane fully blocksPerPlane += 1 # prevent bringing in self in closure: awsCredentialsOverride = self.awsCredentialsOverride # keys will be planeidx, blockidx: keys = list(itertools.product(xrange(npages), xrange(blocksPerPlane))) def readBlockFromTiff(planeIdxBlockIdx): planeIdx, blockIdx = planeIdxBlockIdx blocks = [] planeShape = None blockStart = None blockEnd = None for fname in filenames: reader_ = getFileReaderForPath(fname)(awsCredentialsOverride=awsCredentialsOverride) fp = reader_.open(fname) try: if doMinimizeReads: # use multitif module to generate a fake, in-memory # one-page tif file. the advantage of this is that it # cuts way down on the many small reads that PIL/pillow # will make otherwise, which would be a problem for s3 # or Google Storage tiffParser_ = multitif.TiffParser(fp, debug=False) tiffFilebuffer = multitif.packSinglePage(tiffParser_, pageIdx=planeIdx) byteBuf = io.BytesIO(tiffFilebuffer) try: pilImg = Image.open(byteBuf) ary = conversionFcn(pilImg).T finally: byteBuf.close() del tiffFilebuffer, tiffParser_, pilImg, byteBuf else: # read tif using PIL directly pilImg = Image.open(fp) pilImg.seek(planeIdx) ary = conversionFcn(pilImg).T del pilImg if not planeShape: planeShape = ary.shape[:] blockStart = blockIdx * blocklenPixels blockEnd = min(blockStart+blocklenPixels, planeShape[0]*planeShape[1]) blocks.append(ary.ravel(order='C')[blockStart:blockEnd]) del ary finally: fp.close() buf = vstack(blocks).T # dimensions are now linindex x time (images) del blocks buf = buf.astype(newDtype, casting=casting, copy=False) # append subscript keys based on dimensions linearIdx = arange(blockStart, blockEnd) # zero-based seriesKeys = zip(*map(tuple, unravel_index(linearIdx, planeShape, order='C'))) # add plane index to end of keys if npages > 1: seriesKeys = [tuple(list(keys_)[::-1]+[planeIdx]) for keys_ in seriesKeys] else: seriesKeys = [tuple(list(keys_)[::-1]) for keys_ in seriesKeys] return zip(seriesKeys, buf) # map over blocks rdd = self.sc.parallelize(keys, len(keys)).flatMap(readBlockFromTiff) if npages > 1: dims = (npages, width, height) else: dims = (width, height) metadata = (dims, ntimepoints, newDtype) return rdd, metadata def fromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16', newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False): """Load a Series object directly from binary image stack files. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem
<reponame>SamKaiYang/ros_modbus_nex<gh_stars>0 ########################################################################### # This software is graciously provided by HumaRobotics # under the BSD License on # github: https://github.com/Humarobotics/modbus_wrapper # HumaRobotics is a trademark of Generation Robots. # www.humarobotics.com # # Copyright (c) 2013, Generation Robots. # All rights reserved. # www.generationrobots.com # # This wrapper package is based on the pymodbus library developed by: # <NAME> # github: https://github.com/bashwork/pymodbus # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of the FreeBSD Project. import rospy try: from pymodbus.client.sync import ModbusTcpClient except Exception,e: print "pymodbus does not seem to be installed.\nInstall it by:\nsudo apt-get install python-pymodbus" print e exit() from std_msgs.msg import Int32MultiArray as HoldingRegister from post_threading import Post from threading import Lock NUM_REGISTERS = 20 ADDRESS_READ_START = 40000 ADDRESS_WRITE_START = 40020 class ModbusWrapperClient(): """ Wrapper that integrates python modbus into standardized ros msgs. The wrapper is able to read from and write to a standard modbus tcp/ip server. """ def __init__(self,host,port=502,rate=50,reset_registers=True,sub_topic="modbus_wrapper/output",pub_topic="modbus_wrapper/input"): """ Use subscribers and publisher to communicate with the modbus server. Check the scripts for example code. :param host: Contains the IP adress of the modbus server :type host: string :param port: The port number, where the modbus server is runnning :type port: integer :param rate: How often the registers on the modbusserver should be read per second :type rate: float :param reset_registers: Defines if the holding registers should be reset to 0 after they have been read. Only possible if they are writeable :type reset_registers: bool """ try: self.client = ModbusTcpClient(host,port) except Exception, e: rospy.logwarn("Could not get a modbus connection to the host modbus. %s", str(e)) raise e return self.__rate = rate self.__reading_delay = 1/rate self.post = Post(self) self.__reset_registers = reset_registers self.__reading_register_start = 0 self.__num_reading_registers = 20 # add input registers self.__read_input_register_start = 0 self.__num_read_input_registers = 20 # self.input_size = 16 self.__input = HoldingRegister() self.__input.data = [0 for i in xrange(self.__num_reading_registers )] self.__writing_registers_start = ADDRESS_WRITE_START self.__num_writing_registers = 20 # self.output_size = 16 self.__output = [None for i in range(self.__num_writing_registers)] self.__last_output_time = rospy.get_time() self.__mutex = Lock() self.__sub = rospy.Subscriber(sub_topic,HoldingRegister,self.__updateModbusOutput,queue_size=500) self.__pub = rospy.Publisher(pub_topic,HoldingRegister,queue_size=500, latch=True) rospy.on_shutdown(self.closeConnection) def startListening(self): """ Non blocking call for starting the listener for the readable modbus server registers """ #start reading the modbus self.post.__updateModbusInput() # add start read input register def start_readinput_Listening(self): """ Non blocking call for starting the listener for the readable modbus server registers """ #start reading the modbus self.post.__updateModbusReadInput() def stopListening(self): """ Stops the listener loop """ self.stop_listener = True while not rospy.is_shutdown() and self.listener_stopped is False: rospy.sleep(0.01) def setReadingRegisters(self,start,num_registers): """ Sets the start address of the registers which should be read and their number :param start: First register that is readable :type start: int :param num_registers: Amount of readable registers :type num_registers: int """ self.__reading_register_start = start self.__num_reading_registers = num_registers def setReadingInputRegisters(self,start,num_registers): """ Sets the start address of the registers which should be read and their number :param start: First register that is readable :type start: int :param num_registers: Amount of readable registers :type num_registers: int """ self.__read_input_register_start = start self.__num_read_input_registers = num_registers def setWritingRegisters(self,start,num_registers): """ Sets the start address of the registers which are writeable and their number :param start: First register that is writeable :type start: int :param num_registers: Amount of writeable registers :type num_registers: int """ self.__writing_registers_start = start self.__num_writing_registers = num_registers def getReadingRegisters(self): """ :return: Returns the first address of the readable registers and the number of registers :rtype: int,int """ return self.__reading_register_start,self.__num_reading_registers def getWritingRegisters(self): """ :return: Returns the first address of the writeable registers and the number of registers :rtype: int,int """ return self.__writing_registers_start,self.__num_writing_registers def __updateModbusInput(self,delay=0): """ Loop that is listening to the readable modbus registers and publishes it on a topic :param delay: The delay time until the loop starts :type delay: float """ rospy.sleep(delay) self.listener_stopped = False self.stop_listener = False update = True while not rospy.is_shutdown() and self.stop_listener is False: try: if not rospy.is_shutdown() : tmp = self.readRegisters() if tmp is None: rospy.sleep(2) continue # rospy.logwarn("__updateModbusInput tmp is %s ", str(tmp)) # rospy.logwarn("__updateModbusInput self.__input.data is %s ", str(self.__input.data)) if tmp != self.__input.data: update = True self.__input.data = tmp else: update = False except Exception,e: rospy.logwarn("Could not read holding register. %s", str(e)) raise e rospy.sleep(2) if update: if self.__pub.get_num_connections() > 0: try: self.__pub.publish(self.__input) except Exception,e: rospy.logwarn("Could not publish message. Exception: %s",str(e)) raise e rospy.Rate(self.__rate).sleep() self.listener_stopped = True # add read input registers def __updateModbusReadInput(self,delay=0): """ Loop that is listening to the readable modbus registers and publishes it on a topic :param delay: The delay time until the loop starts :type delay: float """ rospy.sleep(delay) self.listener_stopped = False self.stop_listener = False update = True while not rospy.is_shutdown() and self.stop_listener is False: try: if not rospy.is_shutdown() : tmp = self.read_input_Registers() if tmp is None: rospy.sleep(2) continue # rospy.logwarn("__updateModbusReadInput tmp is %s ", str(tmp)) # rospy.logwarn("__updateModbusReadInput self.__input.data is %s ", str(self.__input.data)) if tmp != self.__input.data: update = True self.__input.data = tmp else: update = False except Exception,e: rospy.logwarn("Could not read holding register. %s", str(e)) raise e rospy.sleep(2) if update: if self.__pub.get_num_connections() > 0: try: self.__pub.publish(self.__input) except Exception,e: rospy.logwarn("Could not publish message. Exception: %s",str(e)) raise e rospy.Rate(self.__rate).sleep() self.listener_stopped = True def __updateModbusOutput(self,msg): """ Callback from the subscriber to update the writeable modbus registers :param msg: value of the new registers :type msg: std_msgs.Int32MultiArray """ output_changed = False for index in xrange(self.__num_writing_registers): if self.__output[index] != msg.data[index]: output_changed = True break if not output_changed: return self.__writeRegisters(self.__writing_registers_start,msg.data) def __writeRegisters(self,address,values): """ Writes modbus registers :param address: First address of the values to write :type address: int :param values: Values to write :type values: list """ with self.__mutex: try: if not rospy.is_shutdown() : # print "writing address",address,"value" self.client.write_registers(address, values) self.output = values except Exception, e: rospy.logwarn("Could not write values %s to address %d. Exception %s",str(values),address, str(e)) raise e def readRegisters(self,address=None,num_registers=None): """ Reads modbus registers :param address: First address of the registers to read :type address: int :param num_registers: Amount of registers to read :type num_registers: int """ if address is None: address = self.__reading_register_start if num_registers is None: num_registers = self.__num_reading_registers tmp = None with self.__mutex: try: tmp = self.client.read_holding_registers(address,num_registers).registers except Exception, e: rospy.logwarn("Could not read on address %d. Exception: %s",address,str(e)) raise e if self.__reset_registers: try: self.client.write_registers(address, [0 for i in xrange(num_registers)]) except Exception, e: rospy.logwarn("Could not write to address %d. Exception: %s", address,str(e)) raise e return tmp # add input registers def read_input_Registers(self,address=None,num_registers=None): """ Reads modbus registers :param address: First address of the registers to read :type address: int :param num_registers: Amount of registers to read :type
return tuple(result) TIFF_PHOTOMETRICS = { 0: 'miniswhite', 1: 'minisblack', 2: 'rgb', 3: 'palette', 4: 'mask', 5: 'separated', 6: 'cielab', 7: 'icclab', 8: 'itulab', 32844: 'logl', 32845: 'logluv', } TIFF_COMPESSIONS = { 1: None, 2: 'ccittrle', 3: 'ccittfax3', 4: 'ccittfax4', 5: 'lzw', 6: 'ojpeg', 7: 'jpeg', 8: 'adobe_deflate', 9: 't85', 10: 't43', 32766: 'next', 32771: 'ccittrlew', 32773: 'packbits', 32809: 'thunderscan', 32895: 'it8ctpad', 32896: 'it8lw', 32897: 'it8mp', 32898: 'it8bl', 32908: 'pixarfilm', 32909: 'pixarlog', 32946: 'deflate', 32947: 'dcs', 34661: 'jbig', 34676: 'sgilog', 34677: 'sgilog24', 34712: 'jp2000', 34713: 'nef', } TIFF_DECOMPESSORS = { None: lambda x: x, 'adobe_deflate': zlib.decompress, 'deflate': zlib.decompress, 'packbits': decodepackbits, 'lzw': decodelzw, } TIFF_DATA_TYPES = { 1: '1B', # BYTE 8-bit unsigned integer. 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; # the last byte must be NULL (binary zero). 3: '1H', # SHORT 16-bit (2-byte) unsigned integer 4: '1I', # LONG 32-bit (4-byte) unsigned integer. 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of # a fraction; the second, the denominator. 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. 7: '1B', # UNDEFINED An 8-bit byte that may contain anything, # depending on the definition of the field. 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer. 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator # of a fraction, the second the denominator. 11: '1f', # FLOAT Single precision (4-byte) IEEE format. 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. 13: '1I', # IFD unsigned 4 byte IFD offset. #14: '', # UNICODE #15: '', # COMPLEX 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) } TIFF_SAMPLE_FORMATS = { 1: 'uint', 2: 'int', 3: 'float', #4: 'void', #5: 'complex_int', 6: 'complex', } TIFF_SAMPLE_DTYPES = { ('uint', 1): '?', # bitmap ('uint', 2): 'B', ('uint', 3): 'B', ('uint', 4): 'B', ('uint', 5): 'B', ('uint', 6): 'B', ('uint', 7): 'B', ('uint', 8): 'B', ('uint', 9): 'H', ('uint', 10): 'H', ('uint', 11): 'H', ('uint', 12): 'H', ('uint', 13): 'H', ('uint', 14): 'H', ('uint', 15): 'H', ('uint', 16): 'H', ('uint', 17): 'I', ('uint', 18): 'I', ('uint', 19): 'I', ('uint', 20): 'I', ('uint', 21): 'I', ('uint', 22): 'I', ('uint', 23): 'I', ('uint', 24): 'I', ('uint', 25): 'I', ('uint', 26): 'I', ('uint', 27): 'I', ('uint', 28): 'I', ('uint', 29): 'I', ('uint', 30): 'I', ('uint', 31): 'I', ('uint', 32): 'I', ('uint', 64): 'Q', ('int', 8): 'b', ('int', 16): 'h', ('int', 32): 'i', ('int', 64): 'q', ('float', 16): 'e', ('float', 32): 'f', ('float', 64): 'd', ('complex', 64): 'F', ('complex', 128): 'D', ('uint', (5, 6, 5)): 'B', } TIFF_ORIENTATIONS = { 1: 'top_left', 2: 'top_right', 3: 'bottom_right', 4: 'bottom_left', 5: 'left_top', 6: 'right_top', 7: 'right_bottom', 8: 'left_bottom', } AXES_LABELS = { 'X': 'width', 'Y': 'height', 'Z': 'depth', 'S': 'sample', # rgb(a) 'P': 'plane', # page 'T': 'time', 'C': 'channel', # color, emission wavelength 'A': 'angle', 'F': 'phase', 'R': 'tile', # region, point 'H': 'lifetime', # histogram 'E': 'lambda', # excitation wavelength 'L': 'exposure', # lux 'V': 'event', 'Q': 'other', } AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items())) # NIH Image PicHeader v1.63 NIH_IMAGE_HEADER = [ ('fileid', 'a8'), ('nlines', 'i2'), ('pixelsperline', 'i2'), ('version', 'i2'), ('oldlutmode', 'i2'), ('oldncolors', 'i2'), ('colors', 'u1', (3, 32)), ('oldcolorstart', 'i2'), ('colorwidth', 'i2'), ('extracolors', 'u2', (6, 3)), ('nextracolors', 'i2'), ('foregroundindex', 'i2'), ('backgroundindex', 'i2'), ('xscale', 'f8'), ('_x0', 'i2'), ('_x1', 'i2'), ('units_t', 'i2'), ('p1', [('x', 'i2'), ('y', 'i2')]), ('p2', [('x', 'i2'), ('y', 'i2')]), ('curvefit_t', 'i2'), ('ncoefficients', 'i2'), ('coeff', 'f8', 6), ('_um_len', 'u1'), ('um', 'a15'), ('_x2', 'u1'), ('binarypic', 'b1'), ('slicestart', 'i2'), ('sliceend', 'i2'), ('scalemagnification', 'f4'), ('nslices', 'i2'), ('slicespacing', 'f4'), ('currentslice', 'i2'), ('frameinterval', 'f4'), ('pixelaspectratio', 'f4'), ('colorstart', 'i2'), ('colorend', 'i2'), ('ncolors', 'i2'), ('fill1', '3u2'), ('fill2', '3u2'), ('colortable_t', 'u1'), ('lutmode_t', 'u1'), ('invertedtable', 'b1'), ('zeroclip', 'b1'), ('_xunit_len', 'u1'), ('xunit', 'a11'), ('stacktype_t', 'i2'), ] #NIH_COLORTABLE_TYPE = ( # 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow', # 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') #NIH_LUTMODE_TYPE = ( # 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', # 'ColorLut', 'CustomGrayscale') #NIH_CURVEFIT_TYPE = ( # 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', # 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated', # 'UncalibratedOD') #NIH_UNITS_TYPE = ( # 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters', # 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits') #NIH_STACKTYPE_TYPE = ( # 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') # MetaMorph STK tags MM_TAG_IDS = { 0: 'auto_scale', 1: 'min_scale', 2: 'max_scale', 3: 'spatial_calibration', #4: 'x_calibration', #5: 'y_calibration', #6: 'calibration_units', #7: 'name', 8: 'thresh_state', 9: 'thresh_state_red', 11: 'thresh_state_green', 12: 'thresh_state_blue', 13: 'thresh_state_lo', 14: 'thresh_state_hi', 15: 'zoom', #16: 'create_time', #17: 'last_saved_time', 18: 'current_buffer', 19: 'gray_fit', 20: 'gray_point_count', #21: 'gray_x', #22: 'gray_y', #23: 'gray_min', #24: 'gray_max', #25: 'gray_unit_name', 26: 'standard_lut', 27: 'wavelength', #28: 'stage_position', #29: 'camera_chip_offset', #30: 'overlay_mask', #31: 'overlay_compress', #32: 'overlay', #33: 'special_overlay_mask', #34: 'special_overlay_compress', #35: 'special_overlay', 36: 'image_property', #37: 'stage_label', #38: 'autoscale_lo_info', #39: 'autoscale_hi_info', #40: 'absolute_z', #41: 'absolute_z_valid', #42: 'gamma', #43: 'gamma_red', #44: 'gamma_green', #45: 'gamma_blue', #46: 'camera_bin', 47: 'new_lut', #48: 'image_property_ex', 49: 'plane_property', #50: 'user_lut_table', 51: 'red_autoscale_info', #52: 'red_autoscale_lo_info', #53: 'red_autoscale_hi_info', 54: 'red_minscale_info', 55: 'red_maxscale_info', 56: 'green_autoscale_info', #57: 'green_autoscale_lo_info', #58: 'green_autoscale_hi_info', 59: 'green_minscale_info', 60: 'green_maxscale_info', 61: 'blue_autoscale_info', #62: 'blue_autoscale_lo_info', #63: 'blue_autoscale_hi_info', 64: 'blue_min_scale_info', 65: 'blue_max_scale_info', #66: 'overlay_plane_color' } # Olympus FluoView MM_DIMENSION = [ ('name', 'a16'), ('size', 'i4'), ('origin', 'f8'), ('resolution', 'f8'), ('unit', 'a64'), ] MM_HEADER = [ ('header_flag', 'i2'), ('image_type', 'u1'), ('image_name', 'a257'), ('offset_data', 'u4'), ('palette_size', 'i4'), ('offset_palette0', 'u4'), ('offset_palette1', 'u4'), ('comment_size', 'i4'), ('offset_comment', 'u4'), ('dimensions', MM_DIMENSION, 10), ('offset_position', 'u4'), ('map_type', 'i2'), ('map_min', 'f8'), ('map_max', 'f8'), ('min_value', 'f8'), ('max_value', 'f8'), ('offset_map', 'u4'), ('gamma', 'f8'), ('offset', 'f8'), ('gray_channel', MM_DIMENSION), ('offset_thumbnail', 'u4'), ('voice_field', 'i4'), ('offset_voice_field', 'u4'), ] # <NAME> LSM CZ_LSM_INFO = [ ('magic_number', 'i4'), ('structure_size', 'i4'), ('dimension_x', 'i4'), ('dimension_y', 'i4'), ('dimension_z', 'i4'), ('dimension_channels', 'i4'), ('dimension_time', 'i4'), ('dimension_data_type', 'i4'), ('thumbnail_x', 'i4'), ('thumbnail_y', 'i4'), ('voxel_size_x', 'f8'), ('voxel_size_y', 'f8'), ('voxel_size_z', 'f8'), ('origin_x', 'f8'), ('origin_y', 'f8'), ('origin_z', 'f8'), ('scan_type', 'u2'), ('spectral_scan', 'u2'), ('data_type', 'u4'), ('offset_vector_overlay', 'u4'), ('offset_input_lut', 'u4'), ('offset_output_lut', 'u4'), ('offset_channel_colors', 'u4'), ('time_interval', 'f8'), ('offset_channel_data_types', 'u4'), ('offset_scan_information', 'u4'), ('offset_ks_data', 'u4'), ('offset_time_stamps', 'u4'), ('offset_event_list', 'u4'), ('offset_roi', 'u4'), ('offset_bleach_roi', 'u4'), ('offset_next_recording', 'u4'), ('display_aspect_x', 'f8'), ('display_aspect_y', 'f8'), ('display_aspect_z', 'f8'), ('display_aspect_time', 'f8'), ('offset_mean_of_roi_overlay', 'u4'), ('offset_topo_isoline_overlay', 'u4'), ('offset_topo_profile_overlay', 'u4'), ('offset_linescan_overlay', 'u4'), ('offset_toolbar_flags', 'u4'), ] # Import functions for LSM_INFO sub-records CZ_LSM_INFO_READERS = { 'scan_information': read_cz_lsm_scan_info, 'time_stamps': read_cz_lsm_time_stamps, 'event_list': read_cz_lsm_event_list, } # Map cz_lsm_info.scan_type to dimension order CZ_SCAN_TYPES = { 0: 'XYZCT', # x-y-z scan 1: 'XYZCT', # z scan (x-z plane) 2: 'XYZCT', # line scan 3: 'XYTCZ', # time series x-y 4: 'XYZTC', # time series x-z 5: 'XYTCZ', # time series 'Mean of ROIs' 6: 'XYZTC', # time series x-y-z 7: 'XYCTZ', # spline scan 8: 'XYCZT', # spline scan x-z 9: 'XYTCZ', # time series spline plane x-z 10: 'XYZCT', # point mode } # Map dimension codes to cz_lsm_info attribute CZ_DIMENSIONS = { 'X': 'dimension_x', 'Y': 'dimension_y', 'Z': 'dimension_z', 'C': 'dimension_channels', 'T': 'dimension_time', } # Descriptions of cz_lsm_info.data_type CZ_DATA_TYPES = { 0: 'varying data types', 2: '12 bit unsigned integer', 5: '32 bit float', } CZ_LSM_SCAN_INFO_ARRAYS = { 0x20000000: "tracks", 0x30000000: "lasers", 0x60000000: "detectionchannels", 0x80000000: "illuminationchannels", 0xa0000000: "beamsplitters", 0xc0000000: "datachannels", 0x13000000: "markers", 0x11000000: "timers", } CZ_LSM_SCAN_INFO_STRUCTS = { 0x40000000: "tracks", 0x50000000: "lasers", 0x70000000: "detectionchannels", 0x90000000: "illuminationchannels", 0xb0000000: "beamsplitters", 0xd0000000: "datachannels", 0x14000000: "markers", 0x12000000: "timers", } CZ_LSM_SCAN_INFO_ATTRIBUTES = { 0x10000001: "name", 0x10000002: "description", 0x10000003: "notes", 0x10000004: "objective", 0x10000005: "processing_summary", 0x10000006: "special_scan_mode", 0x10000007: "oledb_recording_scan_type", 0x10000008: "oledb_recording_scan_mode", 0x10000009: "number_of_stacks", 0x1000000a: "lines_per_plane", 0x1000000b: "samples_per_line", 0x1000000c: "planes_per_volume", 0x1000000d: "images_width", 0x1000000e: "images_height", 0x1000000f: "images_number_planes", 0x10000010: "images_number_stacks", 0x10000011: "images_number_channels", 0x10000012: "linscan_xy_size", 0x10000013: "scan_direction", 0x10000014: "time_series", 0x10000015: "original_scan_data", 0x10000016: "zoom_x", 0x10000017: "zoom_y", 0x10000018: "zoom_z", 0x10000019: "sample_0x", 0x1000001a: "sample_0y", 0x1000001b: "sample_0z", 0x1000001c: "sample_spacing", 0x1000001d: "line_spacing", 0x1000001e: "plane_spacing", 0x1000001f: "plane_width", 0x10000020: "plane_height", 0x10000021: "volume_depth", 0x10000023: "nutation", 0x10000034: "rotation", 0x10000035: "precession", 0x10000036: "sample_0time", 0x10000037: "start_scan_trigger_in", 0x10000038: "start_scan_trigger_out", 0x10000039: "start_scan_event", 0x10000040: "start_scan_time", 0x10000041: "stop_scan_trigger_in", 0x10000042: "stop_scan_trigger_out", 0x10000043: "stop_scan_event", 0x10000044: "stop_scan_time", 0x10000045: "use_rois", 0x10000046: "use_reduced_memory_rois", 0x10000047: "user", 0x10000048: "use_bccorrection", 0x10000049: "position_bccorrection1", 0x10000050: "position_bccorrection2", 0x10000051: "interpolation_y", 0x10000052: "camera_binning", 0x10000053: "camera_supersampling", 0x10000054: "camera_frame_width", 0x10000055: "camera_frame_height", 0x10000056: "camera_offset_x", 0x10000057: "camera_offset_y", # lasers 0x50000001: "name", 0x50000002: "acquire", 0x50000003: "power", # tracks 0x40000001: "multiplex_type", 0x40000002: "multiplex_order", 0x40000003: "sampling_mode", 0x40000004: "sampling_method", 0x40000005: "sampling_number", 0x40000006: "acquire", 0x40000007: "sample_observation_time", 0x4000000b: "time_between_stacks", 0x4000000c: "name", 0x4000000d: "collimator1_name", 0x4000000e: "collimator1_position", 0x4000000f: "collimator2_name", 0x40000010: "collimator2_position", 0x40000011: "is_bleach_track", 0x40000012: "is_bleach_after_scan_number", 0x40000013:
<reponame>alextselegidis/easyappointments-sdk # coding: utf-8 """ Easy!Appointments API These are the OpenAPI specs that describe the REST API of Easy!Appointments. # noqa: E501 OpenAPI spec version: 1.0.0 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from swagger_client.api_client import ApiClient class CategoriesApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def categories_category_id_delete(self, category_id, **kwargs): # noqa: E501 """Delete a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_delete(category_id, async_req=True) >>> result = thread.get() :param async_req bool :param int category_id: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.categories_category_id_delete_with_http_info(category_id, **kwargs) # noqa: E501 else: (data) = self.categories_category_id_delete_with_http_info(category_id, **kwargs) # noqa: E501 return data def categories_category_id_delete_with_http_info(self, category_id, **kwargs): # noqa: E501 """Delete a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_delete_with_http_info(category_id, async_req=True) >>> result = thread.get() :param async_req bool :param int category_id: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['category_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method categories_category_id_delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'category_id' is set if ('category_id' not in params or params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `categories_category_id_delete`") # noqa: E501 collection_formats = {} path_params = {} if 'category_id' in params: path_params['categoryId'] = params['category_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501 return self.api_client.call_api( '/categories/{categoryId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def categories_category_id_get(self, category_id, **kwargs): # noqa: E501 """Get a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_get(category_id, async_req=True) >>> result = thread.get() :param async_req bool :param int category_id: (required) :return: CategoryRecord If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.categories_category_id_get_with_http_info(category_id, **kwargs) # noqa: E501 else: (data) = self.categories_category_id_get_with_http_info(category_id, **kwargs) # noqa: E501 return data def categories_category_id_get_with_http_info(self, category_id, **kwargs): # noqa: E501 """Get a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_get_with_http_info(category_id, async_req=True) >>> result = thread.get() :param async_req bool :param int category_id: (required) :return: CategoryRecord If the method is called asynchronously, returns the request thread. """ all_params = ['category_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method categories_category_id_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'category_id' is set if ('category_id' not in params or params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `categories_category_id_get`") # noqa: E501 collection_formats = {} path_params = {} if 'category_id' in params: path_params['categoryId'] = params['category_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501 return self.api_client.call_api( '/categories/{categoryId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CategoryRecord', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def categories_category_id_put(self, body, category_id, **kwargs): # noqa: E501 """Update a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_put(body, category_id, async_req=True) >>> result = thread.get() :param async_req bool :param CategoryPayload body: (required) :param int category_id: (required) :return: CategoryRecord If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.categories_category_id_put_with_http_info(body, category_id, **kwargs) # noqa: E501 else: (data) = self.categories_category_id_put_with_http_info(body, category_id, **kwargs) # noqa: E501 return data def categories_category_id_put_with_http_info(self, body, category_id, **kwargs): # noqa: E501 """Update a category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_category_id_put_with_http_info(body, category_id, async_req=True) >>> result = thread.get() :param async_req bool :param CategoryPayload body: (required) :param int category_id: (required) :return: CategoryRecord If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'category_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method categories_category_id_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `categories_category_id_put`") # noqa: E501 # verify the required parameter 'category_id' is set if ('category_id' not in params or params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `categories_category_id_put`") # noqa: E501 collection_formats = {} path_params = {} if 'category_id' in params: path_params['categoryId'] = params['category_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501 return self.api_client.call_api( '/categories/{categoryId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CategoryRecord', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def categories_get(self, **kwargs): # noqa: E501 """Get all categories # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_get(async_req=True) >>> result = thread.get() :param async_req bool :param int page: :param int length: :param str sort: :param str q: :param str fields: :param str _with: :return: CategoryCollection If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.categories_get_with_http_info(**kwargs) # noqa: E501 else: (data) = self.categories_get_with_http_info(**kwargs) # noqa: E501 return data def categories_get_with_http_info(self, **kwargs): # noqa: E501 """Get all categories # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.categories_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param int page: :param int length: :param str sort: :param str q: :param str fields: :param str _with: :return: CategoryCollection If the method is called asynchronously, returns the request thread. """ all_params = ['page', 'length', 'sort', 'q', 'fields', '_with'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method categories_get" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'length' in params: query_params.append(('length', params['length'])) # noqa: E501 if 'sort' in params: query_params.append(('sort', params['sort'])) # noqa: E501 if 'q' in params: query_params.append(('q', params['q'])) # noqa: E501 if 'fields' in params: query_params.append(('fields', params['fields'])) # noqa: E501 if '_with' in params: query_params.append(('with', params['_with'])) # noqa: E501 header_params
in this course with this role. """ # If there is an existing registration for the course&person, modify it. # Also enroll this person in the site couse if they aren't already # and if this isn't the site course itself. # Optionally create their work folder (if it doesn't already exist) # # add tutors by using student role with registration.grade='tutor' is_tutor = rolename=='tutor' if rolename == 'tutor': rolename = 'student' if not datestring: datestring = str(Time()) with db.atomic(): (reg, created) = Registration.get_or_create( person = person, course = self) reg.role = Role.by_name(rolename) reg.grade = 'tutor' if is_tutor else '' reg.status = '' # if re-enrolling would have been 'drop' reg.date = datestring reg.save() if not self.name == 'Umber': Course.enroll_site(person, datestring=datestring) if create_work: # Create folder for student work within the course folder. # The absolute path for their student work folder is # e.g. course/students/johnsmith/ with its .access.yaml # & course/students/johnsmith/work/ student_abspath = os.path.join(self.abspath, 'students', person.username) Page.new_folder(student_abspath, user=person, accessdict= {'read':person.username, 'write':person.username}) work_abspath = os.path.join(student_abspath, 'work') Page.new_folder(work_abspath, user=person) # refresh students self._set_users() class Page(BaseModel): # --- path, filename, url definitions --- # With settings on my laptop development machine as # os_courses /Users/mahoney/academics/umber/courses # then for the 'notes/week1' file within a course at 'fall/math' , # the parts are # url: http://127.0.0.1:5000/ umber / fall/math / notes/week1 # protocol hostname url_base path................... # file: /Users/mahoney/academics/umber/courses / fall/math / notes/week1 # os_courses path................... # Following python's os.path phrasing, other terms used here are # basename last word in address (same as os.path.basename) # abspath e.g. /Users/mahoney/.../fall/math/notes/week1 # dirname e.g. /Users/mahoney/.../fall/math/notes # This url would have in its flask request object the attributes # request.url_root 'http://localhost:8090/' # request.path '/umber/fall/math/notes/week1' # # Note that Page.path (e.g. fall/math/notes/week1) # does not have a leading slash or contain the url_base, # while request.path (e.g. /umber/fall/math/notes/week1) does. # # The Page object will also contain various extra data # that isn't stored in the sql database but is instead # pulled from the filesystem. class Meta: db_table = 'Page' # Each course has some sys/* pages which get special treatment. # Also here are site/sys/* pages for editing users and courses, # which are only accessible within the 'site' course. system_pages = ('assignments', 'navigation', 'error', 'folder', 'grades', 'roster', 'user', 'users', 'course', 'courses', 'registration', 'newuser', 'newcourse') editable_system_pages = ('assignments', 'navigation', 'grades', 'user', 'course') page_id = PrimaryKeyField(db_column='page_id') html = TextField() html_lastmodified = TextField() notes = TextField() path = TextField(unique=True) course = ForeignKeyField(model=Course, db_column='course_id', to_field='course_id') _mime_types = None @staticmethod def new_folder(abspath, accessdict=None, user=None): """ Create a new folder with the given abspath. Add it into the github repo. Optionally create its .access.yaml file. """ if os.path.exists(abspath): # bail without doing anything of this already exists # print_debug(' new_folder {} already exists '.format(abspath)) return None try: os.makedirs(abspath) # makes intermediate folders if need be. except: # bail with error message if the OS system won't do it. print_debug(' os.makdir("{}") failed '.format(abspath)) return None # Add an empty .keep file in this new folder, # as a workaround to force git to include this new folder. # (Git pays attention to files, not folders.) open(os.path.join(abspath, '.keep'), 'w').close() # unix 'touch' # Create the new folder object. path = os.path.relpath(abspath, os_courses) folder = Page.get_from_path(path, user=user) if accessdict: # don't do a git commit here - wait to do whole folder folder.write_access_file(accessdict, do_git=False) gitlocal.add_commit(folder) return folder @classmethod def get_from_path(cls, path, revision=None, action=None, user=None): """ Get or create a Page and set up all its internal data i.e. course, file info, user permissions, etc """ (page, iscreated) = Page.get_or_create(path=path) if user == None: user = Person.get_anonymous() page.user = user page.action = action page.revision = revision page._setup_file_properties() # sets page.is_file etc page.gitpath = os.path.join(os_courses, page.path_with_ext) page.login_google_url = url_for('login_google', pagepath=path) page.course = page.get_course() try: if page.course.page_error: ### Unexpected (to me anyway) behavior here : ### page.course = None ### if page.course: # This throws an error! ### ... ### Apparently the peewee database code has put hooks into ### the Page object to do tricky stuff for "page.course", ### seems to drop into peewee and complain. ### I'm avoiding this by returning the Umber site course ### but with a .page_error attribute set. ### In umber.py this will turn the request into 404 not found. return page except AttributeError: # .page_error field not set; keep going. pass page.relpath = page._get_relpath() page._setup_sys() # do this before .get_access() page.access = page.get_access() # gets .access.yaml property. page._setup_user_permissions() # sets page.can['read'] etc if revision or action=='history': page._setup_revision_data() # sets page.history etc page._setup_attachments() # sets .has_attachments page._setup_work() # page.html_title = page.get_html_title() return page def get_html_title(self): """ Return string for the <title></title> html tag. """ try: return self.course.get_shortname() + ' : ' + self.relpath except: return self.path def get_gitpath(self, abspath=None): """ Return file path of page (or abspath file) relative to course path, including file extension if any """ # This abspath option is used in gitlocal.py and umber.py:ajax_upload ; # for attachments the page is not the upload file. _abspath = self.abspath if abspath==None else abspath return os.path.relpath(_abspath, self.course.abspath) def _get_relpath(self): """ Return path of page relative to course path, e.g. notes/home for path=demo/notes/home in course 'demo' """ # self.course must be already set. return os.path.relpath(self.path, self.course.path) def attachments_folder(self): return self.abspath.replace(self.ext, '.attachments') def _setup_attachments(self): if self.is_file and self.ext == '.md': attach_dir = self.attachments_folder() if os.path.exists(attach_dir) and os.path.isdir(attach_dir): self.attachments = self.children(abspath=attach_dir) else: self.attachments = [] self.has_attachments = len(self.attachments) > 0 else: self.attachments = [] self.has_attachments = False def _setup_work(self): """ see if this is a students/<name>/work/<number> student work page; define .is_work and .work, set up .work for html display, update """ # print(' _setup_work : relpath = {}'.format(self.relpath)) m = re.match(r'students/(\w+)/work/(\d+)(\?.*)?', self.relpath) if m: now = Time() self.is_work = True (work_username, work_nth, ignore) = m.groups() work_nth = int(work_nth) self.work_person = Person.by_username(work_username) self.work_assignment = self.course.get_assignment_by_nth(work_nth) self.work = self.work_assignment.get_work(self.work_person) duedate = Time(self.work_assignment.due) self.work_due = duedate.assigndatedetail() # ... but give students a extra grace period of a few hours # before marking things as "late"; # this let's me get "end of day" to something reasonable, # without changing server timezone duedate.arrow = duedate.arrow.shift(hours=due_grace_hours) if self.work.submitted: submitdate = Time(self.work.submitted) self.work_submitted = submitdate.assigndate() self.work_is_late = submitdate > duedate else: self.work_submitted = '' self.work_is_late = now > duedate self.work_grade = self.work.grade # update *_seen fields in the database # TODO : think about whether there's a better # transactional way to update the database here. if self.user_role.name == 'faculty': self.work.faculty_seen = str(now) self.work.save() if self.user.username == work_username: self.work.student_seen = str(now) self.work.save() else: self.is_work = False #self.work = None #self.work_assignment = None #self.work_person = None #self.work_due = '' #self.work_submitted = '' #self.work_is_late = False #self.work_grade = '' def _setup_sys(self): """ define .is_sys. if it is, also define .sys_template, ./sys_edit_template """ # If relpath is 'sys/assignments', then is_sys will be true, # the template will be 'umber/sys/assignments.html' # and the edit template will be 'umber/sys/edit_assignments.html', # (and the access permissions will be in the first line of the template.) self.is_sys = self.relpath[:4] == 'sys/' # -- default values for sys templates for all pages -- if self.is_sys: which = self.relpath[4:] if which == '': which = 'folder' if which not in Page.system_pages: which = 'error' self.sys_template = 'sys/' + which + '.html' if which in Page.editable_system_pages: self.sys_edit_template = 'sys/edit_' + which + '.html' else: self.sys_edit_template = 'sys/editerror.html' def get_course(self): """ return this page's course """ # And if there is no course for this page, # return the site course but also set an error within it. # # extract path pieces e.g. ['demo', 'home'] path_parts = self.path.split('/') # build partial paths e.g. ['demo', 'demo/home'] # (stackoverflow.com/questions/13221896/python-partial-sum-of-numbers) paths = reduce(lambda x,y: x + [x[-1]+'/'+y], path_parts[1:], path_parts[0:1]) # build peewee's "where condition" to find
with all the TimeSeries tseries_dict = {} sort_indeces = {} for name, s_as_dict in series_as_dicts.items(): if "tstamp" in s_as_dict: if sort: sort_indeces[name] = np.argsort(s_as_dict["data"]) s_as_dict["data"] = s_as_dict["data"][sort_indeces[name]] tseries_dict[name] = TimeSeries.from_dict(s_as_dict) # And then ValueSeries, and put both in with the TimeSeries series_list = [] for name, s_as_dict in series_as_dicts.items(): if name in tseries_dict: series_list.append(tseries_dict[name]) elif "t_name" in s_as_dict: tseries = tseries_dict[s_as_dict["t_name"]] if s_as_dict["data"].shape == tseries.shape: # Then we assume that the time and value data have lined up # successfully! :D if sort: s_as_dict["data"] = s_as_dict["data"][ sort_indeces[tseries.name] ] vseries = ValueSeries( name=name, data=s_as_dict["data"], unit_name=s_as_dict["unit_name"], tseries=tseries, ) else: # this will be the case if vseries sharing the same tseries # are not present in the same subset of component_measurements. # In that case just append the vseries even though some tdata gets # duplicated. vseries = append_series( [ s for m in component_measurements for s in m.series_list if s.name == name ], sort=sort, ) series_list.append(vseries) # Finally, add this series to the dictionary representation and return the object obj_as_dict["series_list"] = series_list return cls.from_dict(obj_as_dict) @property def metadata_json_string(self): """Measurement metadata as a JSON-formatted string""" return json.dumps(self.metadata, indent=4) @property def sample_name(self): """Name of the sample on which the measurement was conducted""" if self.sample: return self.sample.name @property def series_list(self): """List of the DataSeries containing the measurement's data""" for i, s in enumerate(self._series_list): if isinstance(s, PlaceHolderObject): self._series_list[i] = s.get_object() return self._series_list @property def data_objects(self): """This is what the DB backend knows to save separately, here the series""" # TimeSeries have to go first, so that ValueSeries are saved with the right t_id! data_object_list = self.time_series for s in self.series_list: if s not in data_object_list: if s.tseries not in data_object_list: # FIXME: some tseries, likely with duplicate data, seem to not # make it into series_list data_object_list.append(s.tseries) data_object_list.append(s) return data_object_list @property def component_measurements(self): """List of the component measurements of which this measurement is a combination For a pure measurement (not a measurement set), this is itself in a list. """ if not self._component_measurements: return [ self, ] for i, m in enumerate(self._component_measurements): if isinstance(m, PlaceHolderObject): self._component_measurements[i] = m.get_object() return self._component_measurements @property def s_ids(self): """List of the id's of the measurement's DataSeries""" return [series.id for series in self._series_list] @property def m_ids(self): """List of the id's of a combined measurement's component measurements""" if not self._component_measurements: return None return [m.id for m in self._component_measurements] @property def series_dict(self): """Dictionary mapping the id's of the measurement's series to the DataSeries""" return {(s.id, s.backend_name): s for s in self.series_list} @property def series_names(self): """List of the names of the series in the measurement""" return set([series.name for series in self.series_list]) @property def value_names(self): """List of the names of the VSeries in the measurement's DataSeries""" return set([vseries.name for vseries in self.value_series]) @property def value_series(self): """List of the VSeries in the measurement's DataSeries""" return [ series for series in self.series_list if isinstance(series, ValueSeries) ] @property def time_names(self): """List of the names of the VSeries in the measurement's DataSeries""" return set([tseries.name for tseries in self.time_series]) @property def time_series(self): """List of the TSeries in the measurement's DataSeries. NOT timeshifted!""" return [series for series in self.series_list if isinstance(series, TimeSeries)] def __getitem__(self, item): """Return the built measurement DataSeries with its name specified by item The item is interpreted as the name of a series. VSeries names can have "-v" or "-y" as a suffix. The suffix "-t" or "-x" to a VSeries name can be used to get instead its corresponding TSeries. In any case, if there are more than one series with the name specified by item, they are appended. The timestamp is always shifted to the measurement's tstamp Args: item (str): The name of a DataSeries (see above) """ ss = [s for s in self.series_list if s.name == item] if len(ss) == 1: s = ss[0] elif len(ss) > 1: s = append_series(ss) elif item[-2:] in ["-t", "-x", "-v", "-y"]: ss = [s for s in self.series_list if s.name == item[:-2]] if len(ss) == 1: s = ss[0] else: s = append_series(ss) else: raise SeriesNotFoundError(f"{self} has no series called {item}") if hasattr(s, "tstamp") and not s.tstamp == self.tstamp: s = time_shifted(s, self.tstamp) return s def __setitem__(self, series_name, series): """Append `series` with name=`series_name` to `series_list` and remove others.""" if not series.name == series_name: raise SeriesNotFoundError( f"Can't set {self}[{series_name}] = {series}. Series names don't agree." ) del self[series_name] self.series_list.append(series) def __delitem__(self, series_name): """Remove all series which have `series_name` as their name from series_list""" new_series_list = [] for s in self.series_list: if not s.name == series_name: new_series_list.append(s) self._series_list = new_series_list def grab(self, item, tspan=None, include_endpoints=False): """Return a value vector with the corresponding time vector Grab is the *canonical* way to retrieve numerical time-dependent data from a measurement in ixdat. The first argument is always the name of the value to get time-resolved data for (the name of a ValueSeries). The second, optional, argument is a timespan to select the data for. Two vectors are returned: first time (t), then value (v). They are of the same length so that `v` can be plotted against `t`, integrated over `t`, interpolated via `t`, etc. `t` and `v` are returned in the units of their DataSeries. TODO: option to specifiy desired units Typical usage:: t, v = measurement.grab(potential, tspan=[0, 100]) Args: item (str): The name of the DataSeries to grab data for tspan (iter of float): Defines the timespan with its first and last values. Optional. By default the entire time of the measurement is included. include_endpoints (bool): Whether to add a points at t = tspan[0] and t = tspan[-1] to the data returned. This makes trapezoidal integration less dependent on the time resolution. Default is False. """ vseries = self[item] tseries = vseries.tseries v = vseries.data t = tseries.data + tseries.tstamp - self.tstamp if tspan is not None: # np arrays don't boolean well :( if include_endpoints: if t[0] < tspan[0]: # then add a point to include tspan[0] v_0 = np.interp(tspan[0], t, v) t = np.append(tspan[0], t) v = np.append(v_0, v) if tspan[-1] < t[-1]: # then add a point to include tspan[-1] v_end = np.interp(tspan[-1], t, v) t = np.append(t, tspan[-1]) v = np.append(v, v_end) mask = np.logical_and(tspan[0] <= t, t <= tspan[-1]) t, v = t[mask], v[mask] return t, v def grab_for_t(self, item, t): """Return a numpy array with the value of item interpolated to time t""" vseries = self[item] tseries = vseries.tseries v_0 = vseries.data t_0 = tseries.data + tseries.tstamp - self.tstamp v = np.interp(t, t_0, v_0) return v def integrate(self, item, tspan=None, ax=None): """Return the time integral of item in the specified timespan""" t, v = self.grab(item, tspan, include_endpoints=True) if ax: if ax == "new": ax = self.plotter.new_ax(ylabel=item) # FIXME: xlabel=self[item].tseries.name gives a problem :( ax.plot(t, v, color="k", label=item) ax.fill_between(t, v, np.zeros(t.shape), where=v > 0, color="g", alpha=0.3) ax.fill_between( t, v, np.zeros(t.shape), where=v < 0, color="g", alpha=0.1, hatch="//" ) return np.trapz(v, t) @property def data_cols(self): """Return a set of the names of all of the measurement's VSeries and TSeries""" return set([s.name for s in (self.value_series + self.time_series)]) @property def plotter(self): """The default plotter for Measurement is ValuePlotter.""" if not self._plotter: from .plotters import ValuePlotter # FIXME: I had to import here to avoid running into circular import issues self._plotter = ValuePlotter(measurement=self) return self._plotter @property def exporter(self): """The default exporter for Measurement is CSVExporter.""" if not self._exporter: self._exporter = CSVExporter(measurement=self) return self._exporter def export(self, *args, exporter=None, **kwargs): """Export the measurement using its exporter (see its Exporter for details)""" if exporter: return exporter.export_measurement(self, *args, **kwargs) return self.exporter.export(*args, **kwargs) def get_original_m_id_of_series(self, series): """Return the id(s) of component measurements to which `series` belongs.""" m_id_list = [] for m in self.component_measurements: if series.id in m.s_ids: m_id_list.append(m.id) if len(m_id_list) == 1: return m_id_list[0] return m_id_list def cut(self, tspan, t_zero=None): """Return a new measurement with the data in the given time interval Args: tspan
0, 2)), Perm((0, 1, 4, 2, 3))] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 1, 2)), Perm((0, 2, 1)), Perm((2, 0, 1)), Perm((2, 1, 0)), Perm((1, 3, 2, 0)), Perm((1, 4, 3, 2, 0)), Perm((1, 2, 4, 0, 3, 6, 5)), Perm((1, 7, 0, 3, 4, 5, 6, 2)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((2, 0, 1)), Perm((1, 2, 3, 0)), Perm((3, 0, 2, 1, 4)), Perm((4, 1, 6, 3, 0, 5, 2)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((1, 0)), Perm((0, 2, 1)), Perm((1, 2, 0)), Perm((2, 1, 0))] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm(()), Perm((0, 1, 3, 2))] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1)), Perm((0, 1, 2, 3)), Perm((0, 3, 5, 1, 4, 2)), Perm((3, 5, 2, 6, 0, 1, 4, 7)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((2, 0, 1, 3)), Perm((3, 1, 2, 0)), Perm((3, 2, 1, 0)), Perm((2, 3, 4, 1, 0)), Perm((4, 2, 0, 3, 5, 1)), Perm((6, 5, 4, 3, 7, 1, 2, 0)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((0, 1, 2, 3)), Perm((3, 2, 5, 1, 4, 0))] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((0, 1)), Perm((0, 1, 2)), Perm((2, 1, 4, 0, 3)), Perm((3, 2, 0, 1, 4))] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 1, 2)), Perm((0, 2, 1)), Perm((0, 2, 1, 4, 3)), Perm((2, 1, 4, 3, 0)), Perm((2, 4, 3, 5, 1, 0)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 1, 2)), Perm((2, 0, 1)), Perm((0, 4, 2, 1, 3)), Perm((1, 0, 3, 4, 2)), Perm((3, 2, 4, 0, 1)), Perm((1, 4, 5, 3, 0, 2)), Perm((0, 4, 2, 5, 1, 3, 6)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1)), Perm((0, 3, 2, 1)), Perm((1, 2, 3, 0)), Perm((1, 2, 3, 4, 0)), Perm((1, 4, 0, 3, 2)), Perm((4, 0, 1, 5, 2, 3)), Perm((3, 6, 1, 2, 0, 5, 4)), Perm((4, 0, 5, 1, 6, 2, 3)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1)), Perm((1, 2, 0, 3)), Perm((1, 3, 4, 0, 2)), Perm((1, 2, 5, 4, 3, 0)), Perm((5, 0, 2, 1, 4, 3)), Perm((4, 5, 2, 6, 3, 1, 0)), Perm((5, 0, 1, 4, 6, 3, 2)), Perm((5, 0, 3, 1, 6, 2, 4)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1)), Perm((1, 2, 0)), Perm((3, 1, 2, 0)), Perm((3, 2, 4, 0, 1)), Perm((2, 0, 5, 1, 3, 4)), Perm((3, 5, 0, 1, 4, 2, 7, 6)), Perm((4, 5, 2, 0, 3, 6, 1, 7)), Perm((6, 3, 2, 0, 1, 7, 4, 5)), ] ) assert InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 3, 2, 1)), Perm((1, 2, 3, 0)), Perm((0, 2, 1, 4, 3)), Perm((6, 1, 0, 5, 3, 4, 2)), Perm((6, 0, 2, 5, 1, 4, 7, 3)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((3, 0, 5, 2, 4, 1))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((1, 0, 2)), Perm((1, 3, 2, 0)), Perm((1, 5, 4, 2, 0, 3)), Perm((4, 2, 0, 3, 5, 1)), Perm((4, 3, 2, 0, 5, 1)), Perm((5, 1, 2, 4, 3, 0, 6)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((2, 0, 1)), Perm((1, 2, 0, 4, 3)), Perm((3, 0, 2, 1, 4, 5)), Perm((5, 3, 2, 0, 1, 6, 4)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((0, 4, 3, 1, 2))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((0, 2, 1)), Perm((0, 4, 2, 3, 1))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((2, 3, 1, 5, 0, 4)), Perm((6, 2, 4, 1, 3, 7, 5, 0))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 4, 2, 1, 3)), Perm((1, 4, 0, 2, 3)), Perm((3, 1, 2, 5, 6, 0, 4)), Perm((0, 4, 6, 5, 7, 1, 2, 3)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((1, 0, 3, 4, 2))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((2, 0, 1, 3))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((1, 3, 2, 4, 0))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1, 3)), Perm((0, 2, 3, 1)), Perm((0, 5, 3, 1, 2, 6, 4)), Perm((2, 4, 3, 0, 1, 6, 7, 5)), Perm((2, 7, 1, 4, 5, 6, 0, 3)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((3, 0, 2, 1)), Perm((3, 1, 2, 0)), Perm((3, 1, 0, 4, 2)), Perm((4, 0, 2, 3, 1)), Perm((4, 0, 3, 1, 2)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((1, 5, 6, 4, 2, 0, 3))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((3, 2, 1, 0, 4)), Perm((4, 3, 2, 0, 5, 1)), Perm((5, 4, 2, 3, 0, 1)), Perm((0, 3, 2, 1, 4, 6, 5)), Perm((2, 3, 0, 5, 1, 6, 4)), Perm((3, 6, 2, 4, 1, 0, 5)), Perm((0, 5, 3, 4, 2, 6, 7, 1)), Perm((0, 6, 4, 3, 7, 1, 5, 2)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((1, 2, 0)), Perm((4, 2, 5, 0, 1, 3)), Perm((6, 3, 5, 1, 7, 4, 0, 2))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((3, 0, 2, 1)), Perm((4, 2, 1, 3, 0))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((3, 2, 0, 1)), Perm((6, 1, 3, 2, 4, 0, 5))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 3, 1)), Perm((0, 3, 2, 1)), Perm((3, 0, 1, 2)), Perm((4, 3, 2, 0, 5, 1)), Perm((1, 0, 6, 4, 3, 5, 2)), Perm((3, 1, 6, 2, 5, 0, 4)), Perm((5, 2, 6, 7, 0, 3, 1, 4)), Perm((7, 1, 6, 2, 5, 0, 4, 3)), ] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [Perm((0, 2, 1)), Perm((3, 1, 2, 0))] ) assert not InsertionEncodablePerms.is_insertion_encodable_maximum( [ Perm((0, 2, 1)), Perm((4, 0, 2, 1, 3)), Perm((2, 5, 4, 1, 3, 0)), Perm((3, 5, 4, 0, 2, 1)), Perm((1, 0, 4, 2, 3, 6, 5)), Perm((6, 3, 2, 0, 5, 1, 4)), Perm((4, 0, 1, 2, 3, 5, 6, 7)), ] ) def test_is_insertion_encodable(): assert InsertionEncodablePerms.is_insertion_encodable( [Perm(()), Perm((0, 1)), Perm((0, 1, 2))] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((0, 3, 2, 1)), Perm((0, 2, 3, 4, 1)), Perm((3, 0, 1, 2, 4)), Perm((4, 2, 1, 0, 3, 5)), Perm((0, 2, 1, 4, 6, 3, 5)), Perm((6, 2, 1, 0, 5, 4, 3)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [Perm((0, 1)), Perm((1, 0)), Perm((1, 0, 3, 2))] ) assert InsertionEncodablePerms.is_insertion_encodable([Perm(()), Perm((1, 0))]) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((0, 1, 2)), Perm((2, 0, 1)), Perm((0, 2, 4, 3, 1)), Perm((1, 2, 4, 0, 3)), Perm((3, 2, 4, 1, 5, 0)), Perm((2, 0, 3, 6, 4, 1, 5)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [Perm((1, 0)), Perm((0, 1, 2)), Perm((0, 3, 1, 2)), Perm((1, 3, 2, 0))] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((2, 1, 0)), Perm((0, 3, 1, 2)), Perm((0, 3, 2, 1)), Perm((3, 2, 1, 0)), Perm((2, 1, 3, 5, 0, 4)), Perm((3, 1, 5, 2, 4, 0)), Perm((5, 1, 4, 3, 0, 2, 6)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [Perm(()), Perm((1, 0)), Perm((2, 0, 1)), Perm((0, 1, 2, 4, 3))] ) assert InsertionEncodablePerms.is_insertion_encodable( [Perm(()), Perm((0,)), Perm((0, 1)), Perm((1, 0)), Perm((0, 3, 1, 2, 4))] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((0, 2, 1)), Perm((3, 2, 0, 1)), Perm((2, 1, 0, 3, 4)), Perm((3, 4, 2, 0, 5, 1)), Perm((4, 2, 7, 5, 0, 6, 3, 1)), ] ) assert InsertionEncodablePerms.is_insertion_encodable([Perm(()), Perm((2, 1, 0))]) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((2, 0, 1)), Perm((3, 0, 1, 2)), Perm((1, 0, 2, 3, 4)), Perm((2, 3, 0, 1, 4)), Perm((3, 4, 1, 0, 2)), Perm((4, 3, 1, 6, 0, 7, 2, 5)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((0, 1, 2)), Perm((1, 2, 0)), Perm((0, 2, 1, 3)), Perm((0, 3, 2, 1)), Perm((3, 0, 1, 2)), Perm((1, 0, 2, 4, 5, 6, 3)), Perm((1, 3, 6, 7, 2, 5, 4, 0)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [Perm(()), Perm((0,)), Perm((1, 0)), Perm((4, 0, 1, 2, 3))] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((0, 1, 2)), Perm((2, 1, 0)), Perm((1, 2, 0, 3, 4)), Perm((3, 1, 0, 4, 2)), Perm((0, 5, 1, 4, 3, 2, 6)), Perm((2, 5, 4, 3, 6, 0, 1)), ] ) assert InsertionEncodablePerms.is_insertion_encodable( [ Perm((1, 0, 2)), Perm((2, 3, 1, 0)), Perm((0, 1, 4, 3, 2)), Perm((0, 4, 3, 1, 2)), Perm((4, 0, 1, 3, 2)), Perm((0, 3, 2, 1, 5, 4)), Perm((2, 4, 0, 3, 5, 1)), ] ) assert
<filename>nexxT/tests/core/test_FilterExceptions.py # SPDX-License-Identifier: Apache-2.0 # Copyright (C) 2020 ifm electronic gmbh # # THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. # import json import logging from pathlib import Path import pytest import pytestqt from PySide2.QtCore import QCoreApplication, QTimer from nexxT.interface import FilterState, Services from nexxT.core.ConfigFiles import ConfigFileLoader from nexxT.core.Application import Application from nexxT.core.Configuration import Configuration import nexxT def setup(): global app app = QCoreApplication.instance() if app is None: app = QCoreApplication() def exception_setup(python, thread, where, activeTime_s): logging.getLogger(__name__).info("------------------------------------------------------") logging.getLogger(__name__).info("Starting exception_setup %d %s %s %f", python, thread, where, activeTime_s) from nexxT.services.ConsoleLogger import ConsoleLogger logger = ConsoleLogger() Services.addService("Logging", logger) class LogCollector(logging.StreamHandler): def __init__(self): super().__init__() self.logs = [] def emit(self, record): self.logs.append(record) # avoid warning flood about service profiling not found Services.addService("Profiling", None) collector = LogCollector() logging.getLogger().addHandler(collector) try: t = QTimer() t.setSingleShot(True) # timeout if test case hangs t2 = QTimer() t2.start((activeTime_s + 3)*1000) try: test_json = Path(__file__).parent / "test_except_constr.json" with test_json.open("r", encoding='utf-8') as fp: cfg = json.load(fp) if nexxT.useCImpl and not python: cfg["composite_filters"][0]["nodes"][2]["library"] = "binary://../binary/${NEXXT_PLATFORM}/${NEXXT_VARIANT}/test_plugins" cfg["composite_filters"][0]["nodes"][2]["thread"] = thread cfg["composite_filters"][0]["nodes"][2]["properties"]["whereToThrow"] = where mod_json = Path(__file__).parent / "test_except_constr_tmp.json" with mod_json.open("w", encoding="utf-8") as fp: json.dump(cfg, fp) config = Configuration() ConfigFileLoader.load(config, mod_json) config.activate("testApp") app.processEvents() aa = Application.activeApplication init = True def timeout(): nonlocal init if init: init = False aa.stop() aa.close() aa.deinit() else: app.exit(0) def timeout2(): print("Application timeout hit!") nonlocal init if init: init = False aa.stop() aa.close() aa.deinit() else: print("application exit!") app.exit(1) t2.timeout.connect(timeout2) t.timeout.connect(timeout) def state_changed(state): if state == FilterState.ACTIVE: t.setSingleShot(True) t.start(activeTime_s*1000) elif not init and state == FilterState.CONSTRUCTED: t.start(1000) aa.stateChanged.connect(state_changed) aa.init() aa.open() aa.start() app.exec_() finally: del t del t2 finally: logging.getLogger().removeHandler(collector) Services.removeAll() return collector.logs @pytest.mark.qt_no_exception_capture def test_exception_python_main_none(): logs = exception_setup(True, "main", "nowhere", 2) # --------------- # port exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_port(): logs = exception_setup(True, "main", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_port(): logs = exception_setup(True, "thread-source", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_port(): logs = exception_setup(True, "compute", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_port(): logs = exception_setup(False, "main", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_port(): logs = exception_setup(False, "thread-source", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_port(): logs = exception_setup(False, "compute", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) # --------------- # init exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_init(): logs = exception_setup(True, "main", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_init(): logs = exception_setup(True, "thread-source", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_init(): logs = exception_setup(True, "compute", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_init(): logs = exception_setup(False, "main", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_init(): logs = exception_setup(False, "thread-source", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_init(): logs = exception_setup(False, "compute", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) # --------------- # start exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_start(): logs = exception_setup(True, "main", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_start(): logs = exception_setup(True, "thread-source", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_start(): logs = exception_setup(True, "compute", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_start(): logs = exception_setup(False, "main", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_start(): logs = exception_setup(False, "thread-source", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_start(): logs = exception_setup(False, "compute", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) # --------------- # stop exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_stop(): logs = exception_setup(True, "main", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_stop(): logs = exception_setup(True, "thread-source", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_stop(): logs = exception_setup(True, "compute", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_stop(): logs = exception_setup(False, "main", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_stop(): logs = exception_setup(False, "thread-source", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_stop(): logs = exception_setup(False, "compute", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) # --------------- # deinit exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_deinit(): logs = exception_setup(True, "main", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_deinit(): logs = exception_setup(True, "thread-source", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of
+ '\n' + \ '#define padding_const2 1' + '\n' + \ '#define pooling_radius_const2 1' + '\n' + \ '#define pooling_stride_const2 1' + '\n' + \ '#define Y_const2 1' + '\n' + \ '#define X_const2 1' + '\n' \ '#define output_H_const2 1' + '\n' \ '#define output_W_const2 1' + '\n' \ '#define pooled_H_const2 1' + '\n' \ '#define pooled_W_const2 1' + '\n' contents += '#define N_BLOCK_GRAD2 1' + '\n' + \ '#define C_BLOCK_GRAD2 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD2 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD2 1' + '\n' + \ '#define Y_BLOCK_GRAD2 1' + '\n\n' contents += '\n\n' + '#define C_const3 1\n' + \ '#define H_const3 1' + '\n' + \ '#define W_const3 1' + '\n' + \ '#define K_const3 1' + '\n' + \ '#define stride_const3 1' + '\n' + \ '#define padding_const3 1' + '\n' + \ '#define pooling_radius_const3 1' + '\n' + \ '#define pooling_stride_const3 1' + '\n' + \ '#define Y_const3 1' + '\n' + \ '#define X_const3 1' + '\n' \ '#define output_H_const3 1' + '\n' \ '#define output_W_const3 1' + '\n' \ '#define pooled_H_const3 1' + '\n' \ '#define pooled_W_const3 1' + '\n' contents += '#define N_BLOCK_GRAD3 1' + '\n' + \ '#define C_BLOCK_GRAD3 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD3 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD3 1' + '\n' + \ '#define Y_BLOCK_GRAD3 1' + '\n\n' contents += '\n\n' + '#define C_const4 1\n' + \ '#define H_const4 1' + '\n' + \ '#define W_const4 1' + '\n' + \ '#define K_const4 1' + '\n' + \ '#define stride_const4 1' + '\n' + \ '#define padding_const4 1' + '\n' + \ '#define pooling_radius_const4 1' + '\n' + \ '#define pooling_stride_const4 1' + '\n' + \ '#define Y_const4 1' + '\n' + \ '#define X_const4 1' + '\n' \ '#define output_H_const4 1' + '\n' \ '#define output_W_const4 1' + '\n' \ '#define pooled_H_const4 1' + '\n' \ '#define pooled_W_const4 1' + '\n' contents += '#define N_BLOCK_GRAD4 1' + '\n' + \ '#define C_BLOCK_GRAD4 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD4 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD4 1' + '\n' + \ '#define Y_BLOCK_GRAD4 1' + '\n\n' contents += '\n\n' + '#define C_const5 1\n' + \ '#define H_const5 1' + '\n' + \ '#define W_const5 1' + '\n' + \ '#define K_const5 1' + '\n' + \ '#define stride_const5 1' + '\n' + \ '#define padding_const5 1' + '\n' + \ '#define pooling_radius_const5 1' + '\n' + \ '#define pooling_stride_const5 1' + '\n' + \ '#define Y_const5 1' + '\n' + \ '#define X_const5 1' + '\n' \ '#define output_H_const5 1' + '\n' \ '#define output_W_const5 1' + '\n' \ '#define pooled_H_const5 1' + '\n' \ '#define pooled_W_const5 1' + '\n' contents += '#define N_BLOCK_GRAD5 1' + '\n' + \ '#define C_BLOCK_GRAD5 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD5 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD5 1' + '\n' + \ '#define Y_BLOCK_GRAD5 1' + '\n\n' contents += '\n\n' + '#define C_const6 1\n' + \ '#define H_const6 1' + '\n' + \ '#define W_const6 1' + '\n' + \ '#define K_const6 1' + '\n' + \ '#define stride_const6 1' + '\n' + \ '#define padding_const6 1' + '\n' + \ '#define pooling_radius_const6 1' + '\n' + \ '#define pooling_stride_const6 1' + '\n' + \ '#define Y_const6 1' + '\n' + \ '#define X_const6 1' + '\n' \ '#define output_H_const6 1' + '\n' \ '#define output_W_const6 1' + '\n' \ '#define pooled_H_const6 1' + '\n' \ '#define pooled_W_const6 1' + '\n' contents += '#define N_BLOCK_GRAD6 1' + '\n' + \ '#define C_BLOCK_GRAD6 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD6 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD6 1' + '\n' + \ '#define Y_BLOCK_GRAD6 1' + '\n\n' contents += '\n\n' + '#define C_const7 1\n' + \ '#define H_const7 1' + '\n' + \ '#define W_const7 1' + '\n' + \ '#define K_const7 1' + '\n' + \ '#define stride_const7 1' + '\n' + \ '#define padding_const7 1' + '\n' + \ '#define pooling_radius_const7 1' + '\n' + \ '#define pooling_stride_const7 1' + '\n' + \ '#define Y_const7 1' + '\n' + \ '#define X_const7 1' + '\n' \ '#define output_H_const7 1' + '\n' \ '#define output_W_const7 1' + '\n' \ '#define pooled_H_const7 1' + '\n' \ '#define pooled_W_const7 1' + '\n' contents += '#define N_BLOCK_GRAD7 1' + '\n' + \ '#define C_BLOCK_GRAD7 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD7 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD7 1' + '\n' + \ '#define Y_BLOCK_GRAD7 1' + '\n\n' contents += '\n\n' + '#define C_const8 1\n' + \ '#define H_const8 1' + '\n' + \ '#define W_const8 1' + '\n' + \ '#define K_const8 1' + '\n' + \ '#define stride_const8 1' + '\n' + \ '#define padding_const8 1' + '\n' + \ '#define pooling_radius_const8 1' + '\n' + \ '#define pooling_stride_const8 1' + '\n' + \ '#define Y_const8 1' + '\n' + \ '#define X_const8 1' + '\n' \ '#define output_H_const8 1' + '\n' \ '#define output_W_const8 1' + '\n' \ '#define pooled_H_const8 1' + '\n' \ '#define pooled_W_const8 1' + '\n' contents += '#define N_BLOCK_GRAD8 1' + '\n' + \ '#define C_BLOCK_GRAD8 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD8 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD8 1' + '\n' + \ '#define Y_BLOCK_GRAD8 1' + '\n\n' contents += '\n\n' + '#define C_const9 1\n' + \ '#define H_const9 1' + '\n' + \ '#define W_const9 1' + '\n' + \ '#define K_const9 1' + '\n' + \ '#define stride_const9 1' + '\n' + \ '#define padding_const9 1' + '\n' + \ '#define pooling_radius_const9 1' + '\n' + \ '#define pooling_stride_const9 1' + '\n' + \ '#define Y_const9 1' + '\n' + \ '#define X_const9 1' + '\n' \ '#define output_H_const9 1' + '\n' \ '#define output_W_const9 1' + '\n' \ '#define pooled_H_const9 1' + '\n' \ '#define pooled_W_const9 1' + '\n' contents += '#define N_BLOCK_GRAD9 1' + '\n' + \ '#define C_BLOCK_GRAD9 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD9 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD9 1' + '\n' + \ '#define Y_BLOCK_GRAD9 1' + '\n\n' contents += '\n\n' + '#define C_const10 1\n' + \ '#define H_const10 1' + '\n' + \ '#define W_const10 1' + '\n' + \ '#define K_const10 1' + '\n' + \ '#define stride_const10 1' + '\n' + \ '#define padding_const10 1' + '\n' + \ '#define pooling_radius_const10 1' + '\n' + \ '#define pooling_stride_const10 1' + '\n' + \ '#define Y_const10 1' + '\n' + \ '#define X_const10 1' + '\n' \ '#define output_H_const10 1' + '\n' \ '#define output_W_const10 1' + '\n' \ '#define pooled_H_const10 1' + '\n' \ '#define pooled_W_const10 1' + '\n' contents += '#define N_BLOCK_GRAD10 1' + '\n' + \ '#define C_BLOCK_GRAD10 1' + '\n' + \ '#define H_ARG_BLOCK_GRAD10 1' + '\n' + \ '#define W_ARG_BLOCK_GRAD10 1' + '\n' + \ '#define Y_BLOCK_GRAD10 1' + '\n\n' contents += '\n\n' + '#define C_constL2 ' + `c` + '\n' + \ '#define H_constL2 ' + `H` + '\n' + \ '#define W_constL2 ' + `W` + '\n' + \ '#define K_constL2 ' + `K` + '\n' + \ '#define stride_constL2 ' + `stride` + '\n' + \ '#define padding_constL2 ' + `padding` + '\n' + \ '#define Y_constL2 ' + `Y` + '\n' + \ '#define X_constL2 ' + `X` + '\n' \ '#define output_H_constL2 ' + `output_H` + '\n' \ '#define output_W_constL2 ' + `output_W` + '\n' print contents macros_file.write(contents) macros_file.close() print 'Recompiling MICMat for the given set of hyperparameters at ' + SPECIFIC_MICMAT_PATH + '. \n' os.environ['SPECIFIC_MICMAT_PATH'] = SPECIFIC_MICMAT_PATH os.environ['MICMAT_PATH'] = MICMAT_PATH path.append(SPECIFIC_MICMAT_PATH) subprocess.call(MICMAT_PATH + 'micmat_build_specific.sh', cwd = MICMAT_PATH, env = os.environ) def test_fft(scratch): H, W = 15, 15 band_H, band_W = 6, 6 inputs = C.MICMat((1, 1, band_H, band_W)).offload_mic().fill_randn(stream, 0., 1.) outputs = C.MICMat((1, 1, H, W)).offload_mic().fill_zeros()
= model.latent_model.gc.get_proba().cpu().numpy() fig = plot_01_matrix(gc_prob, title="GC mask", row_label="Z", col_label="C", row_to_mark=indices) logger.log_figure("GC_mask", fig, step=engine.state.iteration) plt.close(fig) ## ---- Logging ---- ## @trainer.on(Events.ITERATION_COMPLETED(every=opt.fast_log_period)) def fast_log(engine): metrics = deepcopy(engine.state.metrics) logger.log_metrics(step=engine.state.iteration, metrics=metrics) @trainer.on(Events.ITERATION_COMPLETED(every=opt.eval_period)) def evaluate_and_log(engine): with torch.no_grad(): evaluator.run(valid_loader, 1) # make sure no keys intersect before concatenating assert len(set(engine.state.metrics.keys()) & set(evaluator.state.metrics["all"].keys())) == 0 metrics = deepcopy(engine.state.metrics) metrics.update(evaluator.state.metrics["all"]) # best metrics g_reg = opt.g_reg_coeff * g_scaling * model.latent_model.g_regularizer().item() gc_reg = opt.gc_reg_coeff * gc_scaling * model.latent_model.gc_regularizer().item() total_reg = g_reg + gc_reg best_score = best_checkpoint_handler._saved[-1].priority if opt.best_criterion == "loss": metrics["best_loss_valid"] = - best_score metrics["best_nll_valid"] = - best_score - total_reg if opt.best_criterion == "nll": metrics["best_loss_valid"] = - best_score + total_reg metrics["best_nll_valid"] = - best_score # LR scheduling if opt.scheduler == "reduce_on_plateau": scheduler.step(- best_score) metrics["last_lr"] = scheduler._last_lr[0] # timers metrics["time_avg_iter"] = timer_avg_iter.value() timer_avg_iter.reset() metrics["time_eval"] = timer_eval.value() metrics["time_total"] = timer_total.value() logger.log_metrics(step=engine.state.iteration, metrics=metrics) # evaluating representation def evaluate_disentanglement(model, metrics, postfix=""): metrics["linear_score" + postfix], _ = linear_regression_metric(model, test_loader, device, opt=opt) metrics["mean_corr_coef" + postfix], _, assignments, z, z_hat = mean_corr_coef(model, test_loader, device, opt=opt) return assignments, z, z_hat @trainer.on(Events.COMPLETED) def final_evaluate_and_log(engine): """ Post-fix == _best : Best model before thresholding post-fix == _final: Best model after thresholding and further training. """ print("final_evaluate_and_log...") with torch.no_grad(): evaluator.run(valid_loader, 1) evaluator_test.run(test_loader, 1) # merge metrics of trainer, evaluator and evaluator_test assert len(set(engine.state.metrics.keys()) & set(evaluator.state.metrics["all"].keys()) & set(evaluator_test.state.metrics["all"].keys())) == 0 metrics = deepcopy(engine.state.metrics) metrics.update(evaluator.state.metrics["all"]) metrics.update(evaluator_test.state.metrics["all"]) # add _final postfix for key in list(metrics.keys()): metrics[key + "_final"] = metrics.pop(key) # best metrics g_reg = opt.g_reg_coeff * g_scaling * model.latent_model.g_regularizer().item() gc_reg = opt.gc_reg_coeff * gc_scaling * model.latent_model.gc_regularizer().item() total_reg = g_reg + gc_reg best_score = best_checkpoint_handler._saved[-1].priority if opt.best_criterion == "loss": metrics["best_loss_valid"] = - best_score metrics["best_nll_valid"] = - best_score - total_reg if opt.best_criterion == "nll": metrics["best_loss_valid"] = - best_score + total_reg metrics["best_nll_valid"] = - best_score # timers metrics["time_avg_iter"] = timer_avg_iter.value() timer_avg_iter.reset() metrics["time_eval"] = timer_eval.value() metrics["time_total"] = timer_total.value() # misc metrics["num_examples_train"] = len(train_loader.dataset) if opt.mode != "latent_transition_only" : evaluate_disentanglement(model, metrics, postfix="_final") # Evaluate linear_score and MCC on best models after thresholding best_files = [f.name for f in os.scandir(opt.output_dir) if f.name.startswith("best")] if len(best_files) > 0: print(f"Found {len(best_files)} best checkpoints, evaluating the last one.") model.load_state_dict(torch.load(os.path.join(opt.output_dir, best_files[-1]))["model"]) model.eval() else: print(f"Found 0 thresh_best checkpoints, reporting final metric") assignments, z, z_hat = evaluate_disentanglement(model, metrics, postfix="_best") perm_mat = np.zeros((opt.z_max_dim, opt.z_max_dim)) perm_mat[assignments] = 1.0 # save both ground_truth and learned latents np.save(os.path.join(opt.output_dir, "z_hat_best.npy"), z_hat) np.save(os.path.join(opt.output_dir, "z_gt_best.npy"), z) else: perm_mat = np.eye(opt.z_max_dim) if hasattr(train_dataset, "gt_g") and hasattr(model.latent_model, "g"): learned_g = (model.latent_model.g.get_proba() > 0.5).cpu().numpy().astype(np.float32) permuted_learned_g = np.matmul(np.matmul(perm_mat, learned_g), perm_mat.transpose()) #metrics["g_fn"], metrics["g_fp"] = edge_errors(permuted_learned_g, train_dataset.gt_g.cpu().numpy()) # Some runs used the above where target and prediction are flipped, resulting in flipped fn and fp metrics["g_fn"], metrics["g_fp"] = edge_errors(train_dataset.gt_g.cpu().numpy(), permuted_learned_g) metrics["g_shd"] = metrics["g_fn"] + metrics["g_fp"] if hasattr(train_dataset, "gt_gc") and hasattr(model.latent_model, "gc"): learned_gc = (model.latent_model.gc.get_proba() > 0.5).cpu().numpy().astype(np.float32) permuted_learned_gc = np.matmul(perm_mat, learned_gc) #metrics["gc_fn"], metrics["gc_fp"] = edge_errors(permuted_learned_gc, train_dataset.gt_gc.cpu().numpy()) # Some runs used the above where target and prediction are flipped, resulting in flipped fn and fp metrics["gc_fn"], metrics["gc_fp"] = edge_errors(train_dataset.gt_gc.cpu().numpy(), permuted_learned_gc) metrics["gc_shd"] = metrics["gc_fn"] + metrics["gc_fp"] logger.log_metrics(step=engine.state.iteration, metrics=metrics) # start training trainer.run(train_loader, opt.epochs) def init_exp(args=None): parser = argparse.ArgumentParser() parser.add_argument("--mode", type=str, required=True, choices=["vae", "supervised_vae", "random_vae", "latent_transition_only"], help="Model to use") parser.add_argument("--full_seq", action="store_true", help="This flag can only be used with --mode vae and --n_lag == 1. \ It makes the model expect the examples in the minibatch to be full sequences.") # data parser.add_argument("--dataset", type=str, required=True, help="Type of the dataset to be used. 'toy-MANIFOLD/TRANSITION_MODEL'") parser.add_argument("--gt_z_dim", type=int, default=10, help="ground truth dimensionality of z (for TRANSITION_MODEL == 'temporal_sparsity_non_trivial')") parser.add_argument("--gt_x_dim", type=int, default=20, help="ground truth dimensionality of x (for MANIFOLD == 'nn')") parser.add_argument("--num_samples", type=int, default=int(1e6), help="num_samples for synthetic datasets") parser.add_argument("--no_norm", action="store_true", help="no normalization in toy datasets") parser.add_argument("--dataroot", type=str, default="./", help="path to dataset") parser.add_argument("--train_prop", type=float, default=None, help="proportion of all samples used in validation set") parser.add_argument("--valid_prop", type=float, default=0.10, help="proportion of all samples used in validation set") parser.add_argument("--test_prop", type=float, default=0.10, help="proportion of all samples used in test set") parser.add_argument("--include_invalid", action="store_true", help="proportion of all samples used in test set") parser.add_argument("--n_workers", type=int, default=4, help="number of data loading workers") parser.add_argument("--batch_size", type=int, default=1024, help="batch size used during training") parser.add_argument("--eval_batch_size", type=int, default=1024, help="batch size used during evaluation") parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for") parser.add_argument("--time_limit", type=float, default=None, help="After this amount of time, terminate training.") # identifiable latent causal model (ILCM) parser.add_argument("--network_arch", type=str, default="MLP", choices=["MLP"], help="Type of network used for the transition model.") parser.add_argument("--n_lag", type=int, default=1, help="p(x_t | x_t-1, ..., x_t-n_lag)") parser.add_argument("--z_dim", type=int, default=10, help="Dimension of the learned latent representation") parser.add_argument("--transition_n_layer", type=int, default=5, help="number of hidden layers in transition NNs (0 implies linear).") parser.add_argument("--transition_hid_dim", type=int, default=512, help="number of units in each hidden layers of the transition NNs.") parser.add_argument("--output_delta", action="store_true", help="In transition model, the net will output the delta between z_tm1 and z_t instead of z_t") parser.add_argument("--delta_factor", type=float, default=1., help="factor multiplying the delta outputted by the transition network. (useful only with --output_delta)") parser.add_argument("--g_reg_coeff", type=float, default=0.0, help="Regularization coefficient for graph connectivity between z^t and z^{<t}") parser.add_argument("--gc_reg_coeff", type=float, default=0.0, help="Regularization coeff for graph connectivity between z^t and c") parser.add_argument("--drawhard", action="store_true", help="Instead of using soft samples in gumbel sigmoid, use hard samples in forward.") parser.add_argument("--gumbel_temperature", type=float, default=1.0, help="Controls the temperature in the gumbel-sigmoid masks.") parser.add_argument("--freeze_m", action="store_true", help="Do not learn m") parser.add_argument("--freeze_g", action="store_true", help="Do not learn g") parser.add_argument("--freeze_gc", action="store_true", help="Do not learn gc") parser.add_argument("--unfreeze_dummies", action="store_true", help="Learn the dummy parameters in masking") parser.add_argument("--var_p_mode", type=str, default="independent", choices=["dependent", "independent", "fixed"], help="dependent: dependency on z^t-1, independent: no dep on z^t-1, fixed: not learned at all") parser.add_argument("--learn_decoder_var", action="store_true", help="learn a variance of p(x|z)") parser.add_argument("--init_decoder_var", type=float, default=None, help="The initial variance of p(x|z).") parser.add_argument("--bn_enc_dec", action="store_true", help="Whether to use batch norm or not in encoder/decoder.") parser.add_argument("--bn_transition_net", action="store_true", help="Whether to use batch norm or not is transition net.") # vae parser.add_argument("--encoder", type=str, default='tabular', choices=['tabular'], help="VAE encoder architecture") parser.add_argument("--decoder", type=str, default='tabular', choices=['tabular'], help="VAE decoder architecture") parser.add_argument("--encoder_depth_multiplier", type=int, default=2, help="The amount of channels per layer is multiplied by this value") parser.add_argument("--decoder_depth_multiplier", type=int, default=2, help="The amount of channels per layer is multiplied by this value") parser.add_argument("--beta", type=float, default=1) # optimization parser.add_argument("--lr", type=float, default=5e-4, help="Learning rate") parser.add_argument("--max_grad_clip", type=float, default=0, help="Max gradient value (clip above - for off)") parser.add_argument("--max_grad_norm", type=float, default=0, help="Max norm of gradient (clip above - 0 for off)") parser.add_argument("--amsgrad", action="store_true", help="Use AMSgrad instead of Adam.") # logging parser.add_argument("--output_dir", required=True, help="Directory to output logs and model checkpoints") parser.add_argument("--fresh", action="store_true", help="Remove output directory before starting, even if experiment is completed.") parser.add_argument("--ckpt_period", type=int, default=50000, help="Number of batch iterations between each checkpoint.") parser.add_argument("--eval_period", type=int, default=5000, help="Number of batch iterations between each evaluation on the validation set.") parser.add_argument("--fast_log_period", type=int, default=100, help="Number of batch iterations between each cheap log.") parser.add_argument("--plot_period", type=int, default=10000, help="Number of batch iterations between each cheap log.") parser.add_argument("--scheduler", type=str, default="reduce_on_plateau", choices=["reduce_on_plateau"], help="Patience for reducing the learning rate in terms of evaluations on tye validation set") parser.add_argument("--scheduler_patience", type=int, default=120, help="(applies only to reduce_on_plateau) Patience for reducing the learning rate in terms of evaluations on tye validation set") parser.add_argument("--best_criterion", type=str, default="loss", choices=["loss", "nll"], help="Criterion to look at for saving best model and early stopping. loss include regularization terms") parser.add_argument('--no_print', action="store_true", help='do not print') parser.add_argument('--comet_key', type=str, default=None, help="comet api-key") parser.add_argument('--comet_tag', type=str, default=None, help="comet tag, to ease comparison") parser.add_argument('--comet_workspace', type=str, default=None, help="comet workspace") parser.add_argument('--comet_project_name', type=str, default=None, help="comet project_name") # misc parser.add_argument("--no_cuda", action="store_false", dest="cuda", help="Disables cuda") parser.add_argument("--double", action="store_true", help="Use Double precision") parser.add_argument("--seed", type=int, default=0, help="manual seed") if args is not None: opt = parser.parse_args(args) else: opt = parser.parse_args() # option preparation opt.num_samples = int(opt.num_samples) # cast float to integer opt.no_drawhard = not opt.drawhard opt.freeze_dummies = not opt.unfreeze_dummies opt.z_max_dim = opt.z_dim opt.freeze_m = True # Hack to get a plot of the random representation and avoid training altogether if "random" in opt.mode: opt.plot_period = 1 opt.time_limit = 0.001 # create experiment path if not os.path.exists(opt.output_dir): os.makedirs(opt.output_dir) # verify if terminate_ file exists, meaning the experiment is already completed. In that case, end script. if not opt.fresh: with os.scandir(opt.output_dir) as it: for entry in it: if entry.name.startswith("terminate_checkpoint") and entry.name.endswith(".pt"): print("This experiment is already completed and --fresh is False. Ending program.") sys.exit() # stop program # wiping out experiment folder for filename in
<gh_stars>1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (c) Honda Research Institute Europe GmbH # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # import functools import logging import os import shlex import socket import threading import sip sip.setapi( 'QString', 2 ) sip.setapi( 'QVariant', 2 ) from PyQt5.QtCore import pyqtSignal, QObject, QByteArray from PyQt5.QtWidgets import * from ToolBOSCore.BuildSystem import BuildSystemTools from ToolBOSCore.ZenBuildMode import BuildOptionsWidget, ConsoleWidget,\ DependencyChecker, ExternalToolsWidget,\ InstallDialog, MenuBar, MetaInfoWidget,\ QtPackageModel, TaskButtonsWidget from ToolBOSCore.GenericGUI import IconProvider, TerminalWidget from ToolBOSCore.Packages import ProjectProperties from ToolBOSCore.Platforms import CrossCompilation, Platforms from ToolBOSCore.Settings import ToolBOSConf from ToolBOSCore.Storage import VersionControl from ToolBOSCore.Tools import SSH from ToolBOSCore.Util import Any, FastScript class MainWindow( QObject, object ): """ GUI controller class """ _seqTasksRun = pyqtSignal() _seqTasksFinished = pyqtSignal() def __init__( self, projectRoot=None ): QObject.__init__( self ) self.app = None self.console = None self.controlsLayout = None self.controlsWidget = None self.enabledCBs = [] self.externalTools = None self.globalInstDialog = None self.lock = threading.Lock() self.mainLayout = None self.mainWidget = None self.menuBar = None self.metaInfo = None self.model = QtPackageModel.BSTPackageModel() self.multiTermWidget = None self.optionsWidget = None self.platformCBs_nat = {} self.platformCBs_natLayout = None self.platformCBs_natWidget = None self.platformCBs_xcmp = {} self.platformCBs_xcmpLayout = None self.platformCBs_xcmpWidget = None self.projectRoot = projectRoot self.rightPaneLayout = None self.rightPaneWidget = None self.runningProcesses = 0 self.taskButtons = None self.terminals = {} self.visibleRemoteTerminals = set() self.window = None self._depCheckers = {} self._seqTasks = [] # task queue self._toolBOSConf = ToolBOSConf.getGlobalToolBOSConf() # unset VERBOSE environment variable as this would be inherited to # child processes, this is not intended -- instead we will rely on # the corresponding checkbox FastScript.unsetEnv( 'VERBOSE' ) def build( self ): options = '' if self.optionsWidget.getVerboseValue(): options += ' -v' jobs = self.optionsWidget.getParallelValue() if jobs > 1: options += ' -j %d' % jobs protoCmd = 'BST.py %s' % options self.console.addCommandToHistory_remote( protoCmd ) self._disableButtons() self._focusRemoteTerminals() for platform, checkbox in self.platformCBs_nat.items(): if checkbox.isChecked(): try: terminal = self.terminals[ 'nativ_' + platform ] hostname = terminal.hostname command = protoCmd logging.info( 'compiling natively on %s for %s', hostname, platform ) self._execProgram( terminal, command ) except KeyError: pass # don't build / ignore disabled platforms for platform, checkbox in self.platformCBs_xcmp.items(): if checkbox.isChecked(): try: platform = str( checkbox.text() ) terminal = self.terminals[ 'xcomp_' + platform ] hostname = terminal.hostname command = '%s -p %s' % ( protoCmd, platform ) logging.info( 'cross-compiling on %s for %s', hostname, platform ) logging.debug( 'terminal=%s', str(terminal) ) self._execProgram( terminal, command ) except KeyError: pass # don't build / ignore disabled platforms def clean( self ): if self.optionsWidget.getVerboseValue(): command = 'BST.py -dv' else: command = 'BST.py -d' self.console.addCommandToHistory_local( command ) self._focusLocalTerminal() self._execProgram( self.terminals[ 'localhost' ], command ) def quit( self ): logging.debug( 'closing connections...' ) for terminal in self.terminals.values(): terminal.kill() logging.debug( 'exiting application...' ) self.app.closeAllWindows() def globalInstall( self ): force = self._toolBOSConf.getConfigOption( 'BST_svnCheck' ) == False if force or self.globalInstall_vcsCheck(): self.globalInstall_askReason() def globalInstall_vcsCheck( self ): try: vcs = VersionControl.auto() errors = vcs.consistencyCheck() except EnvironmentError: errors = [ 'No VCS revision information.', 'Unable to find ".svn" or ".git" directories.', 'Please make sure to install from a SVN working ' 'copy or Git repository.' ] if errors: title = errors[0] msg = 'Attention: ' + errors[1] + '\n\n' + errors[2] dialog = QMessageBox() dialog.critical( self.window, title, msg, QMessageBox.Cancel ) dialog.show() return False else: return True def globalInstall_askReason( self ): self.globalInstDialog = InstallDialog.GlobalInstallDialog() self.globalInstDialog.cancelled.connect( self._enableButtons ) self.globalInstDialog.ready.connect( self.globalInstall_exec ) self.globalInstDialog.show() def globalInstall_exec(self, changeType, reason): Any.requireIsTextNonEmpty( changeType ) Any.requireIsTextNonEmpty( reason ) # escape any doublequotes to not confuse the shlex used later on # (TBCORE-1323) reason = reason.replace( '"', '\\"' ) logging.debug( 'reason: %s', reason ) # Potentially this could be more generalized, f.i. into # PackageDetector / pkgInfo.py --> "Does pkg. need seq. install"? Any.requireIsTextNonEmpty( self.projectRoot ) installHook = os.path.join( self.projectRoot, 'installHook.sh' ) doSeqInstall = os.path.exists( installHook ) if doSeqInstall: msg = '%s: %s (<MAKEFILE_PLATFORM>)' % ( changeType, reason ) else: msg = '%s: %s' % ( changeType, reason ) if self.optionsWidget.getVerboseValue(): command = 'BST.py -ivy -M "%s"' % msg else: command = 'BST.py -iy -M "%s"' % msg # If the package has a installHook.sh (like in most "External" pkg.) # we need to launch the native installation on each individual # platform rather than on localhost only so that the installHook.sh # gets executed for each platform (see TBCORE-1094). if doSeqInstall: logging.info( 'performing sequential installation for each platform' ) self._execProgramSequential( command ) else: logging.info( 'performing combined multi-platform installation' ) self._focusLocalTerminal() self._execProgram( self.terminals[ 'localhost' ], command ) def main( self ): self.app = QApplication( [] ) self.app.setStyle( 'fusion' ) self.window = QMainWindow() self.menuBar = MenuBar.MenuBar( self.window ) self.mainLayout = QGridLayout() self.mainWidget = QWidget() self.multiTermWidget = TerminalWidget.MultiTermWidget() self.optionsWidget = BuildOptionsWidget.BuildOptionsWidget() self.controlsLayout = QHBoxLayout() self.controlsWidget = QWidget() self.metaInfo = MetaInfoWidget.MetaInfoWidget( self.model, self.controlsWidget ) self.console = ConsoleWidget.ConsoleWidget( self.controlsWidget ) self.taskButtons = TaskButtonsWidget.TaskButtonsWidget() self.platformCBs_natLayout = QVBoxLayout() self.platformCBs_natWidget = QGroupBox( 'build natively on' ) self.platformCBs_xcmpLayout = QVBoxLayout() self.platformCBs_xcmpWidget = QGroupBox( 'cross-compile for' ) self.rightPaneLayout = QVBoxLayout() self.rightPaneWidget = QWidget() self.runningProcesses = 0 self._seqTasksRun.connect( self._onSeqTasksRun ) self._seqTasksFinished.connect( self._onSeqTasksFinished ) # create an always existing terminal for localhost commands terminal = TerminalWidget.TerminalWidget( True, parent=self.multiTermWidget ) terminal.setToolTip( 'localhost' ) terminal.setWindowTitle('localhost') self.terminals[ 'localhost' ] = terminal self.multiTermWidget.addTerminal( terminal ) BST_localPaths = tuple( ToolBOSConf.getConfigOption( 'BST_localPaths' ) ) localHostname = socket.gethostname() sshPossible = SSH.guessRemoteAccessIsPossible() sshToolTip = 'Remote compilation not possible as SSH authorized keys are not configured' onLocaldiskToolTip = 'Remote compilation not possible as project is on local disc' projectOnLocaldisk = self.projectRoot.startswith( BST_localPaths ) remoteCompilation = sshPossible and not projectOnLocaldisk xcmpPlatforms = [] crossCompileHosts = self._toolBOSConf.getConfigOption( 'BST_crossCompileHosts' ) Any.requireIsDictNonEmpty( crossCompileHosts ) for platformName, compileHost in crossCompileHosts.items(): if compileHost: xcmpPlatforms.append( platformName ) xcmpPlatforms.sort() # platform selection in right pane, # create terminals for all other platforms (hide disabled ones) nativePlatforms = Platforms.getPlatformNames() defaultNative = CrossCompilation.getNativeCompilationList() defaultXcmp = CrossCompilation.getCrossCompilationList() for platform in nativePlatforms: checkbox = QCheckBox( platform ) checkbox.setChecked( platform in defaultNative ) checkbox.stateChanged.connect( self._onPlatformSelectionChange ) compileHost = CrossCompilation.getNativeCompileHost( platform ) natHost = 'Natively compile for "%s" on "%s"' % (platform, compileHost) checkbox.setToolTip( natHost ) self.platformCBs_nat[ platform ] = checkbox self.platformCBs_natLayout.addWidget( checkbox ) if remoteCompilation or compileHost == localHostname: checkbox.setEnabled( True ) else: checkbox.setEnabled( False ) checkbox.setChecked( False ) if projectOnLocaldisk: checkbox.setToolTip( onLocaldiskToolTip ) elif not sshPossible: checkbox.setToolTip( sshToolTip ) try: compileHost = CrossCompilation.getNativeCompileHost( platform ) if compileHost: logging.debug( 'native compile-host for platform=%s: %s', platform, compileHost ) fullPlatformString = Platforms.getFullPlatformString( platform ) infoText = 'Console output for %s (%s)' % ( platform, fullPlatformString ) terminal = TerminalWidget.TerminalWidget( False, parent=self.multiTermWidget ) terminal.setHostname( compileHost ) terminal.setToolTip( infoText ) terminal.setWindowTitle( infoText ) terminal.isNative = True terminal.hostChanged.connect( functools.partial( self._onHostChange, terminal ) ) terminal.closeRequest.connect( functools.partial( self._closeTerminal, terminal, checkbox ) ) self.terminals[ 'nativ_' + platform ] = terminal else: logging.debug( 'no native compile-host for platform=%s', platform ) checkbox.setEnabled( False ) checkbox.hide() # skip non-working platforms except KeyError: logging.error( "No
import numpy from numba import jit, prange from numpy.typing import ArrayLike from scipy.ndimage import correlate as scipy_ndimage_correlate __fastmath = {'contract', 'afn', 'reassoc'} __error_model = 'numpy' def numba_cpu_correlate(image: ArrayLike, kernel: ArrayLike, output=None): # Kernel must have odd dimenions: if any((s % 2) == 0 for s in kernel.shape): raise ValueError( "This convolution function only supports kernels with odd lengths." ) # Numba does not support float16 yet: dtype = numpy.float32 image = image.astype(dtype=dtype, copy=False) kernel = kernel.astype(dtype=dtype, copy=False) # Ensure contiguity: if not image.flags['C_CONTIGUOUS']: image = numpy.ascontiguousarray(image) if not image.flags['C_CONTIGUOUS']: kernel = numpy.ascontiguousarray(kernel) if output is None: output = numpy.zeros_like(image) # Switching on a per-dimension basis: if image.ndim == 1 and kernel.ndim == 1: output = _numba_cpu_correlation_1d(image, kernel, output) elif image.ndim == 2 and kernel.ndim == 2: output = _numba_cpu_correlation_2d(image, kernel, output) elif image.ndim == 3 and kernel.ndim == 3: output = _numba_cpu_correlation_3d(image, kernel, output) elif image.ndim == 4 and kernel.ndim == 4: output = _numba_cpu_correlation_4d(image, kernel, output) elif image.ndim == 5 and kernel.ndim == 5: output = _numba_cpu_correlation_5d(image, kernel, output) elif image.ndim == 6 and kernel.ndim == 6: output = _numba_cpu_correlation_6d(image, kernel, output) else: return scipy_ndimage_correlate(image, kernel, output) return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_1d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None ): (il0,) = image.shape (kl0,) = kernel.shape khl0 = kl0 // 2 def image_get(u): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u return image[u] for oi0 in prange(il0): acc = 0 for ki0 in range(-khl0, khl0 + 1): imgval = image_get(oi0 + ki0) kerval = kernel[khl0 + ki0] acc += imgval * kerval output[oi0] = acc return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_2d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None ): il0, il1 = image.shape kl0, kl1 = kernel.shape khl0, khl1 = kl0 // 2, kl1 // 2 def image_get(u, v): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u v = 0 if v < 0 else v v = il1 - 1 if v >= il1 else v return image[u, v] for oi0 in prange(il0): for oi1 in range(il1): acc = 0 for ki0 in range(-khl0, khl0 + 1): for ki1 in range(-khl1, khl1 + 1): imgval = image_get(oi0 + ki0, oi1 + ki1) kerval = kernel[khl0 + ki0, khl1 + ki1] acc += imgval * kerval output[oi0, oi1] = acc return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_3d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None ): il0, il1, il2 = image.shape kl0, kl1, kl2 = kernel.shape khl0, khl1, khl2 = kl0 // 2, kl1 // 2, kl2 // 2 def image_get(u, v, w): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u v = 0 if v < 0 else v v = il1 - 1 if v >= il1 else v w = 0 if w < 0 else w w = il2 - 1 if w >= il2 else w return image[u, v, w] for oi0 in prange(il0): for oi1 in range(il1): for oi2 in range(il2): acc = 0 for ki0 in range(-khl0, khl0 + 1): for ki1 in range(-khl1, khl1 + 1): for ki2 in range(-khl2, khl2 + 1): imgval = image_get(oi0 + ki0, oi1 + ki1, oi2 + ki2) kerval = kernel[khl0 + ki0, khl1 + ki1, khl2 + ki2] acc += imgval * kerval output[oi0, oi1, oi2] = acc return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_4d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None, parallelism: int = 8 ): il0, il1, il2, il3 = image.shape kl0, kl1, kl2, kl3 = kernel.shape khl0, khl1, khl2, khl3 = kl0 // 2, kl1 // 2, kl2 // 2, kl3 // 2 def image_get(u, v, w, x): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u v = 0 if v < 0 else v v = il1 - 1 if v >= il1 else v w = 0 if w < 0 else w w = il2 - 1 if w >= il2 else w x = 0 if x < 0 else x x = il3 - 1 if x >= il3 else x return image[u, v, w, x] for oi0 in prange(il0): for oi1 in range(il1): for oi2 in range(il2): for oi3 in range(il3): acc = 0 for ki0 in range(-khl0, khl0 + 1): for ki1 in range(-khl1, khl1 + 1): for ki2 in range(-khl2, khl2 + 1): for ki3 in range(-khl3, khl3 + 1): imgval = image_get( oi0 + ki0, oi1 + ki1, oi2 + ki2, oi3 + ki3 ) kerval = kernel[ khl0 + ki0, khl1 + ki1, khl2 + ki2, khl3 + ki3 ] acc += imgval * kerval output[oi0, oi1, oi2, oi3] = acc return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_5d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None, parallelism: int = 8 ): il0, il1, il2, il3, il4 = image.shape kl0, kl1, kl2, kl3, kl4 = kernel.shape khl0, khl1, khl2, khl3, khl4 = kl0 // 2, kl1 // 2, kl2 // 2, kl3 // 2, kl4 // 2 def image_get(u, v, w, x, y): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u v = 0 if v < 0 else v v = il1 - 1 if v >= il1 else v w = 0 if w < 0 else w w = il2 - 1 if w >= il2 else w x = 0 if x < 0 else x x = il3 - 1 if x >= il3 else x y = 0 if y < 0 else y y = il4 - 1 if y >= il4 else y return image[u, v, w, x, y] for oi0 in prange(il0): for oi1 in range(il1): for oi2 in range(il2): for oi3 in range(il3): for oi4 in range(il4): acc = 0 for ki0 in range(-khl0, khl0 + 1): for ki1 in range(-khl1, khl1 + 1): for ki2 in range(-khl2, khl2 + 1): for ki3 in range(-khl3, khl3 + 1): for ki4 in range(-khl4, khl4 + 1): imgval = image_get( oi0 + ki0, oi1 + ki1, oi2 + ki2, oi3 + ki3, oi4 + ki4, ) kerval = kernel[ khl0 + ki0, khl1 + ki1, khl2 + ki2, khl3 + ki3, khl4 + ki4, ] acc += imgval * kerval output[oi0, oi1, oi2, oi3, oi4] = acc return output @jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath) def _numba_cpu_correlation_6d( image: ArrayLike, kernel: ArrayLike, output: ArrayLike = None, parallelism: int = 8 ): il0, il1, il2, il3, il4, il5 = image.shape kl0, kl1, kl2, kl3, kl4, kl5 = kernel.shape khl0, khl1, khl2, khl3, khl4, khl5 = ( kl0 // 2, kl1 // 2, kl2 // 2, kl3 // 2, kl4 // 2, kl5 // 2, ) def image_get(u, v, w, x, y, z): u = 0 if u < 0 else u u = il0 - 1 if u >= il0 else u v = 0 if v < 0 else v v = il1 - 1 if v >= il1 else v w = 0 if w < 0 else w w = il2 - 1 if w >= il2 else w x = 0 if x < 0 else x x = il3 - 1 if x >= il3 else x y = 0 if y < 0 else y y = il4 - 1 if y >= il4 else y z = 0 if z < 0 else z z = il5 - 1 if z >= il5 else z return image[u, v, w, x, y, z] for oi0 in prange(il0): for oi1 in range(il1): for oi2 in range(il2): for oi3 in range(il3): for oi4 in range(il4): for oi5
<reponame>mailemccann/cmtb<filename>frontback/frontBackCSHORE.py import math from scipy.interpolate import griddata from prepdata import inputOutput, prepDataLib import os import datetime as DT import netCDF4 as nc import numpy as np from getdatatestbed.getDataFRF import getObs, getDataTestBed from testbedutils.geoprocess import FRFcoord from testbedutils.sblib import timeMatch, timeMatch_altimeter, makeNCdir from testbedutils.anglesLib import geo2STWangle, STWangle2geo, vectorRotation import plotting.operationalPlots as oP import makenc from matplotlib import pyplot as plt from subprocess import check_output def CSHORE_analysis(startTime, inputDict): """ Args: startTime (str): this is the time that all the CSHORE runs are tagged by (e.g., '2012-12-31T00:30:30Z') inputDicts (dict): dictionary input version_prefix - right now we have MOBILE, MOBILE_RESET. FIXED workingDir - path to the working directory the user wants pFlag - do you want plots or not? netCDFdir - directory where the netCDF files will be saved, like a boss Returns: None """ version_prefix = inputDict['version_prefix'] workingDir = inputDict['workingDirectory'] pFlag = inputDict['pFlag'] if 'netCDFdir' in inputDict.keys(): netCDFdir = inputDict['netCDFdir'] else: whoami = check_output('whoami')[:-1] netCDFdir = 'home/%s/thredds_data' % whoami if 'THREDDS' in inputDict: server = inputDict['THREDDS'] else: print('Chosing CHL thredds by Default, this may be slower!') server = 'CHL' model='CSHORE' #initialize the class cshore_io = inputOutput.cshoreIO() # get into the directory I need start_dir = workingDir path_prefix = os.path.join(model, version_prefix) # data super directiory d_s = DT.datetime.strptime(startTime, '%Y-%m-%dT%H:%M:%SZ') date_str = d_s.strftime('%Y-%m-%dT%H%M%SZ') # THE COLONS!!! startTime has COLONS!!! params, bc, veg, hydro, sed, morpho, meta = cshore_io.load_CSHORE_results(os.path.join(start_dir, path_prefix, date_str)) # params - metadata about the run # bc - boundary condition data, but for some reason it does not include the initial conditions? # veg - vegetation information # hydro - current and wave information # sed - sediment information # morpho - bed elevation information test = np.append(bc['time_offshore'], max(bc['time_offshore']) + (bc['time_offshore'][1] - bc['time_offshore'][0])) times = np.array([d_s + DT.timedelta(seconds=s) for s in test]) # change my coordinate system back to FRF!!!!!!! BC_FRFX = meta["BC_FRF_X"] BC_FRFY = meta["BC_FRF_Y"] x_n = BC_FRFX - morpho['x'][0] model_time = times[-1] # make the plots like a boss, with greatness if pFlag: # A - pull all the the observations that I need and store as dictionaries!!!!!!! # Altimeter data!!!!!!!! Alt05 = oP.alt_PlotData('Alt05', model_time, times) Alt04 = oP.alt_PlotData('Alt04', model_time, times) Alt03 = oP.alt_PlotData('Alt03', model_time, times) # go ahead and time match the altimeter data if Alt05['TS_toggle']: # ALT05 obs_zb = Alt05['zb'] obs_time = Alt05['time'] obs_loc = round(Alt05['xFRF']) mod_zb = morpho['zb'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_n, mod_n = timeMatch_altimeter(obs_time, obs_zb, comp_time, mod_zb) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del Alt05['zb'] del Alt05['time'] del Alt05['plot_ind'] Alt05['zb'] = obs_n Alt05['time'] = comp_time_n Alt05['plot_ind'] = plot_ind if Alt04['TS_toggle']: # ALT04 obs_zb = Alt04['zb'] obs_time = Alt04['time'] obs_loc = round(Alt04['xFRF']) mod_zb = morpho['zb'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_n, mod_n = timeMatch_altimeter(obs_time, obs_zb, comp_time, mod_zb) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del Alt04['zb'] del Alt04['time'] del Alt04['plot_ind'] Alt04['zb'] = obs_n Alt04['time'] = comp_time_n Alt04['plot_ind'] = plot_ind if Alt03['TS_toggle']: # ALT03 obs_zb = Alt03['zb'] obs_time = Alt03['time'] obs_loc = round(Alt03['xFRF']) mod_zb = morpho['zb'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_n, mod_n = timeMatch_altimeter(obs_time, obs_zb, comp_time, mod_zb) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del Alt03['zb'] del Alt03['time'] del Alt03['plot_ind'] Alt03['zb'] = obs_n Alt03['time'] = comp_time_n Alt03['plot_ind'] = plot_ind # wave data & current data!!! Adopp_35 = oP.wave_PlotData('adop-3.5m', model_time, times) AWAC6m = oP.wave_PlotData('awac-6m', model_time, times) # this is just to check to see if I rounded down when i set my bathymetry, # in which case the 6m AWAC would not be inside the plot limits. if AWAC6m['xFRF'] > max(x_n): # if it is, round it down to nearest 1m - this will move it to the boundary if it IS the boundary gage AWAC6m['xFRF'] = float(int(AWAC6m['xFRF'])) else: pass AWAC8m = oP.wave_PlotData('awac-8m', model_time, times) # this is just to check to see if I rounded down when i set my bathymetry, # in which case the 8m AWAC would not be inside the plot limits. if AWAC8m['xFRF'] > max(x_n): # if it is, round it down to nearest 1m - this will move it to the boundary if it IS the boundary gage AWAC8m['xFRF'] = float(int(AWAC8m['xFRF'])) else: pass # go ahead and time match the wave and current dat! if Adopp_35['TS_toggle']: # Adopp_35 # get time-matched data!!!!!! waves obs_Hs = Adopp_35['Hs'] obs_time = Adopp_35['wave_time'] obs_loc = round(Adopp_35['xFRF']) mod_Hs = hydro['Hs'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_Hs_n, mod_Hs_n = timeMatch(obs_time, obs_Hs, comp_time, mod_Hs) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del Adopp_35['Hs'] del Adopp_35['wave_time'] del Adopp_35['plot_ind'] Adopp_35['Hs'] = obs_Hs_n Adopp_35['wave_time'] = comp_time_n Adopp_35['plot_ind'] = plot_ind # get time-matched data!!!!!! currents # V obs_V = Adopp_35['V'] obs_time = Adopp_35['cur_time'] obs_loc = round(Adopp_35['xFRF']) mod_V = hydro['vmean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_V_n, mod_V_n = timeMatch(obs_time, obs_V, comp_time, mod_V) plot_ind_V = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del Adopp_35['V'] temp_cur_time = Adopp_35['cur_time'] del Adopp_35['cur_time'] del Adopp_35['plot_ind_V'] Adopp_35['V'] = obs_V_n Adopp_35['cur_time'] = comp_time_n Adopp_35['plot_ind_V'] = plot_ind_V # U obs_U = Adopp_35['U'] obs_time = temp_cur_time obs_loc = round(Adopp_35['xFRF']) mod_U = hydro['umean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_U_n, mod_U_n = timeMatch(obs_time, obs_U, comp_time, mod_U) # delete and re-assign del Adopp_35['U'] Adopp_35['U'] = obs_U_n if AWAC6m['TS_toggle']: # AWAC6m # get time-matched data!!!!!! waves obs_Hs = AWAC6m['Hs'] obs_time = AWAC6m['wave_time'] obs_loc = round(AWAC6m['xFRF']) mod_Hs = hydro['Hs'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_Hs_n, mod_Hs_n = timeMatch(obs_time, obs_Hs, comp_time, mod_Hs) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del AWAC6m['Hs'] del AWAC6m['wave_time'] del AWAC6m['plot_ind'] AWAC6m['Hs'] = obs_Hs_n AWAC6m['wave_time'] = comp_time_n AWAC6m['plot_ind'] = plot_ind # get time-matched data!!!!!! currents # V obs_V = AWAC6m['V'] obs_time = AWAC6m['cur_time'] obs_loc = round(AWAC6m['xFRF']) mod_V = hydro['vmean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_V_n, mod_V_n = timeMatch(obs_time, obs_V, comp_time, mod_V) plot_ind_V = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del AWAC6m['V'] temp_cur_time = AWAC6m['cur_time'] del AWAC6m['cur_time'] del AWAC6m['plot_ind_V'] AWAC6m['V'] = obs_V_n AWAC6m['cur_time'] = comp_time_n AWAC6m['plot_ind_V'] = plot_ind_V # U obs_U = AWAC6m['U'] obs_time = temp_cur_time obs_loc = round(AWAC6m['xFRF']) mod_U = hydro['umean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_U_n, mod_U_n = timeMatch(obs_time, obs_U, comp_time, mod_U) # delete and re-assign del AWAC6m['U'] AWAC6m['U'] = obs_U_n if AWAC8m['TS_toggle']: # AWAC8m # get time-matched data!!!!!! waves obs_Hs = AWAC8m['Hs'] obs_time = AWAC8m['wave_time'] obs_loc = round(AWAC8m['xFRF']) mod_Hs = hydro['Hs'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_Hs_n, mod_Hs_n = timeMatch(obs_time, obs_Hs, comp_time, mod_Hs) plot_ind = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del AWAC8m['Hs'] del AWAC8m['wave_time'] del AWAC8m['plot_ind'] AWAC8m['Hs'] = obs_Hs_n AWAC8m['wave_time'] = comp_time_n AWAC8m['plot_ind'] = plot_ind # get time-matched data!!!!!! currents # V obs_V = AWAC8m['V'] obs_time = AWAC8m['cur_time'] obs_loc = round(AWAC8m['xFRF']) mod_V = hydro['vmean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_V_n, mod_V_n = timeMatch(obs_time, obs_V, comp_time, mod_V) plot_ind_V = np.where(abs(comp_time_n - model_time) == min(abs(comp_time_n - model_time)), 1, 0) # delete and re-assign del AWAC8m['V'] temp_cur_time = AWAC8m['cur_time'] del AWAC8m['cur_time'] del AWAC8m['plot_ind_V'] AWAC8m['V'] = obs_V_n AWAC8m['cur_time'] = comp_time_n AWAC8m['plot_ind_V'] = plot_ind_V # U obs_U = AWAC8m['U'] obs_time = temp_cur_time obs_loc = round(AWAC8m['xFRF']) mod_U = hydro['umean'][:, np.where(abs(x_n - obs_loc) == min(abs(x_n - obs_loc)), 1, 0) == 1].squeeze() comp_time = times[1:] comp_time_n, obs_U_n, mod_U_n = timeMatch(obs_time, obs_U, comp_time, mod_U) #
import os import torch import torch.nn as nn import torch.optim as optim import torch.backends.cudnn as cudnn import torch.nn.init as init import argparse from torch.autograd import Variable import torch.utils.data as data #from data import v2, v1, AnnotationTransform, VOCDetection, detection_collate, VOCroot, VOC_CLASSES from data import FISHdetection, detection_collate, v2, v1, BaseTransform from utils.augmentations import SSDAugmentation from layers.modules import MultiBoxLoss from ssd_multiphase_custom_512_group import build_ssd import numpy as np import time import h5py from sklearn.model_selection import train_test_split, KFold import copy from test_ap import test_net def str2bool(v): return v.lower() in ("yes", "true", "t", "1") parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training') parser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer') parser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model') parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching') parser.add_argument('--batch_size', default=16, type=int, help='Batch size for training') parser.add_argument('--resume', default=None, type=str, help='Resume from checkpoint') parser.add_argument('--num_workers', default=1, type=int, help='Number of workers used in dataloading') # parser.add_argument('--iterations', default=120000, type=int, help='Number of training iterations') parser.add_argument('--start_iter', default=0, type=int, help='Begin counting iterations starting from this value (should be used with resume)') parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model') parser.add_argument('--lr', '--learning-rate', default=5e-4, type=float, help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD') parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD') parser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration') parser.add_argument('--visdom', default=True, type=str2bool, help='Use visdom to for loss visualization') parser.add_argument('--send_images_to_visdom', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step') parser.add_argument('--save_folder', default='weights/', help='Location to save checkpoint models') # parser.add_argument('--voc_root', default=VOCroot, help='Location of VOC root directory') args = parser.parse_args() if args.cuda and torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') #cfg = (v1, v2)[args.version == 'v2'] if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) """#################### Hyperparameters ####################""" ssd_dim = 512 # current CT dataset has mean pixel val of 33.5 means = (34, 34, 34) num_classes = 2 # lesion or background batch_size = args.batch_size #accum_batch_size = 32 #iter_size = accum_batch_size / batch_size max_iter = 10001 weight_decay = 0.0005 stepvalues = (5000, 8000) gamma = 0.1 momentum = 0.9 # use batchnorm for vgg & extras batch_norm = True # OHNM (online hard neg mining) ratio (pos:neg = 1:x) ohnm_neg_ratio = 1 # data augmentation hyperparams gt_pixel_jitter = 0.01 expand_ratio = 1.5 # CV hyperparams cross_validation = 5 # ap hyperparam confidence_threshold = 0.01 # string for output & weight name logging output_string = 'ssd512_group_vanilla_BN_10CV' """#########################################################""" if args.visdom: import visdom viz = visdom.Visdom() """"########## Data Loading & dimension matching ##########""" # load custom CT dataset datapath = '/home/vision/tkdrlf9202/Datasets/liver_lesion_aligned/lesion_dataset_4phase_aligned.h5' train_sets = [('liver_lesion')] def load_lesion_dataset(data_path): """ loads custom liver dataset if preprocessed h5 data exists, load it if not, load and preprocess raw liver dataset :param data_path: :return: flattened CT and mask data """ # check if the preprocessed dataset exists if os.path.isfile(data_path): # load the preprocessed dataset dump print('loading lesion dataset...') with h5py.File(data_path, 'r') as dataset_h5: group_ct = dataset_h5['ct'] group_coordinate = dataset_h5['coordinate'] ct = [i[:] for i in group_ct.values()] coordinate = [i[:] for i in group_coordinate.values()] dataset_h5.close() return ct, coordinate ct, coord = load_lesion_dataset(datapath) # ct: [subjects, sample, phase, channel, 512, 512] # coord: [subjects, sample, phase, channel, 5], [x_min, y_min, x_max, y_max, 0 (lesion class label)] format # make channels last & 0~255 uint8 image for idx in range(len(ct)): ct[idx] = np.transpose(ct[idx] * 255, [0, 1, 3, 4, 2]).astype(dtype=np.uint8) # use only coordinate from the middle slice, ditch the upper & lower ones coord[idx] = coord[idx][:, :, 1, :] """ use CV instead # split train & valid set, subject-level (without shuffle) ct_train, ct_valid, coord_ssd_train, coord_ssd_valid = train_test_split(ct, coord, test_size=0.1, shuffle=False) """ # 5-fold CV kf = KFold(n_splits=cross_validation) kf.get_n_splits(ct, coord) # flatten the subject & sample dimension for each sets by stacking ct_train = [] ct_valid = [] coord_ssd_train = [] coord_ssd_valid = [] for train_index, valid_index in kf.split(ct): ct_train_part = [ct[x] for ind, x in enumerate(train_index)] ct_valid_part = [ct[x] for ind, x in enumerate(valid_index)] coord_train_part = [coord[x] for ind, x in enumerate(train_index)] coord_valid_part = [coord[x] for ind, x in enumerate(valid_index)] ct_train.append(np.vstack(ct_train_part)) ct_valid.append(np.vstack(ct_valid_part)) coord_ssd_train.append(np.vstack(coord_train_part).astype(np.float64)) coord_ssd_valid.append(np.vstack(coord_valid_part).astype(np.float64)) print('using 5-fold CV...') for idx in range(cross_validation): print(ct_train[idx].shape, ct_valid[idx].shape) """ # for debug data with one slice per subject ct_train = (np.array(ct).transpose([0, 1, 3, 4, 2]) * 255).astype(np.uint8) coord_ssd_train = np.array(coord).astype(np.float64) """ """#########################################################""" """#################### Network Definition ####################""" ssd_net = build_ssd('train', ssd_dim, num_classes, batch_norm=batch_norm) net = ssd_net if args.cuda: net = torch.nn.DataParallel(ssd_net) cudnn.benchmark = True if args.resume: print('Resuming training, loading {}...'.format(args.resume)) ssd_net.load_weights(args.resume) else: #vgg_weights = torch.load(args.save_folder + args.basenet) print('pretrained weights not loaded: training from scratch...') # print('Loading base network...') if args.cuda: net = net.cuda() def xavier(param): init.xavier_uniform(param) def weights_init(m): if isinstance(m, nn.Conv2d): xavier(m.weight.data) m.bias.data.zero_() if not args.resume: print('Initializing weights...') # initialize newly added layers' weights with xavier method ssd_net.extras.apply(weights_init) ssd_net.loc.apply(weights_init) ssd_net.conf.apply(weights_init) # for 5-fold CV, define 5 copies of the model and optimizer print('copying models & optimizers for CV...') net_cv = [] optimizer_cv = [] for idx in range(cross_validation): net_cv.append(copy.deepcopy(net)) optimizer_cv.append(optim.SGD(net_cv[idx].parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)) criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, ohnm_neg_ratio, 0.5, False, args.cuda) del net """#########################################################""" # create train & valid log text file f_train = open('train_log_' + output_string + '.txt', 'w') f_train.write('iteration\tloss\tloc_loss\tconf_loss\n') f_valid = open('valid_log_' + output_string + '.txt', 'w') f_valid.write('iteration\tloss\tloc_loss\tconf_loss\tAP\n') def train(): # loss counters loc_loss = 0 # epoch conf_loss = 0 epoch = 0 print('Loading Dataset...') dataset_train = [] dataset_valid = [] dataset_ap = [] # create 5-fold CV datasets for idx in range(cross_validation): dataset_train.append(FISHdetection(ct_train[idx], coord_ssd_train[idx], SSDAugmentation(gt_pixel_jitter, expand_ratio, ssd_dim, means), dataset_name='liver_lesion_train' + str(idx))) dataset_valid.append(FISHdetection(ct_valid[idx], coord_ssd_valid[idx], SSDAugmentation(gt_pixel_jitter, expand_ratio, ssd_dim, means), dataset_name='liver_detection_valid' + str(idx))) dataset_ap.append(FISHdetection(ct_valid[idx], coord_ssd_valid[idx], None, 'lesion_valid_ap')) # hard-define the epoch size to the minimum of set in CV: minimal impact epoch_size = [] for idx in range(cross_validation): epoch_size.append(len(dataset_train[idx]) // args.batch_size) epoch_size = min(epoch_size) print('Training SSD on 5-fold CV set...') step_index = 0 # visdom plot if args.visdom: # initialize visdom loss plot lot = viz.line( X=torch.zeros((1,)).cpu(), Y=torch.zeros((1, 3)).cpu(), opts=dict( xlabel='Iteration', ylabel='Loss', title='Current SSD_Liver Training Loss', legend=['Loc Loss', 'Conf Loss', 'Loss'] ) ) valid_lot = viz.line( X=torch.zeros((1,)).cpu(), Y=torch.zeros((1, 3)).cpu(), opts=dict( xlabel='Iteration', ylabel='Loss', title='SSD_Liver Validation Loss', legend=['Loc Loss', 'Conf Loss', 'Loss'] ) ) ap_lot = viz.line( X=torch.zeros((1)).cpu(), Y=torch.zeros((1)).cpu(), opts=dict( xlabel='Iteration', ylabel='AP', title='SSD_Liver AP validation', legend=['AP'] ) ) batch_iterator = None data_loader_train = [] data_loader_valid = [] for idx in range(cross_validation): data_loader_train.append(data.DataLoader(dataset_train[idx], batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=detection_collate, pin_memory=True)) data_loader_valid.append(data.DataLoader(dataset_valid[idx], batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=detection_collate, pin_memory=True)) for iteration in range(args.start_iter, max_iter): for idx in range(cross_validation): net_cv[idx].train() if (not batch_iterator) or (iteration % epoch_size == 0): # create batch iterator batch_iterator = [] for idx in range(cross_validation): batch_iterator.append(iter(data_loader_train[idx])) if iteration in stepvalues: step_index += 1 for idx in range(cross_validation): adjust_learning_rate(optimizer_cv[idx], args.gamma, step_index) epoch += 1 """ if iteration == 2000: for idx in range(cross_validation): # Freeze the conf layers after some iters to prevent overfitting for conf_param in net_cv[idx].module.conf.parameters(): conf_param.requires_grad = False """ # load train data loss_cv = 0. loc_loss_cv = 0. conf_loss_cv = 0. for idx in range(cross_validation): images, targets = next(batch_iterator[idx]) if args.cuda: images = images.cuda().view(images.shape[0], -1, images.shape[3], images.shape[4]) images = Variable(images) targets = [Variable(anno.cuda(), volatile=True) for anno in targets] else: images = images.view(images.shape[0], -1, images.shape[3], images.shape[4]) images = Variable(images) targets = [Variable(anno, volatile=True) for anno in targets] """ DEBUG CODE: printout augmented images & targets""" if False: import matplotlib.pyplot as plt import matplotlib.patches as patches from PIL import Image print('Debug mode: printing augmented data...') images_print = images.data[:, :, :, :].cpu().numpy() images_print[images_print < 0] = 0 targets_print = np.array([target.data[0].cpu().numpy().squeeze()[:4] for target in targets]) targets_print *= images_print.shape[2] images_print = images_print.astype(np.uint8) # center format to min-max format min_x, min_y, max_x, max_y = targets_print[:, 0], targets_print[:, 1], targets_print[:, 2], targets_print[:, 3] width = (max_x - min_x).astype(np.int32) height = (max_y - min_y).astype(np.int32) min_x = min_x.astype(np.int32) min_y = min_y.astype(np.int32) for idx in range(images_print.shape[0]): for idx_img in range(images_print.shape[1]): # visualization: draw gt & predicted bounding box and save to image output_image = images_print[idx, idx_img] fig, ax = plt.subplots(1) ax.imshow(output_image, cmap='gray') # green gt box rect_gt = patches.Rectangle((min_x[idx], min_y[idx]), width[idx], height[idx], linewidth=1, edgecolor='g', facecolor='none') ax.add_patch(rect_gt) plt.savefig(os.path.join('debug', 'train_' + str(idx) + '_' + str(idx_img) + '.png')) plt.close() exit() # forward t0 = time.time() out = net_cv[idx](images) # backprop optimizer_cv[idx].zero_grad() loss_l, loss_c = criterion(out, targets) loss = loss_l + loss_c loss.backward() optimizer_cv[idx].step() t1 = time.time() loss_cv += loss.data[0] loc_loss_cv += loss_l.data[0] conf_loss_cv += loss_c.data[0] del out loss_cv, loc_loss_cv, conf_loss_cv = loss_cv / cross_validation, loc_loss_cv / cross_validation, conf_loss_cv / cross_validation # train log if iteration % 10 == 0: print('Timer: %.4f sec.' % (t1 - t0)) print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss_cv), end=' ') if args.visdom and args.send_images_to_visdom: random_batch_index = np.random.randint(images.size(0)) viz.image(images.data[random_batch_index].cpu().numpy()) # write text log f_train.write(str(iteration)+'\t'+str(loss_cv)+'\t'+str(loc_loss_cv)+'\t'+str(conf_loss_cv)+'\n') f_train.flush() # validation phase for each several train iter if iteration % 100 == 0 and iteration > 10: del images, targets for idx in range(cross_validation):
0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.281063, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 3.60783, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0343111, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.229638, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.214378, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.138057, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.22268, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.112401, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.473138, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.12503, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.42852, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0405005, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00579071, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0535371, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0428259, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.0940376, 'Execution Unit/Register Files/Runtime Dynamic': 0.0486166, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.121367, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.312111, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.46888, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00104339, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00104339, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000936888, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000378052, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000615197, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00363886, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00900001, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0411696, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.61874, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.128772, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.139831, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 4.96435, 'Instruction Fetch Unit/Runtime Dynamic': 0.322411, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0358592, 'L2/Runtime Dynamic': 0.0086515, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 2.66766, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.699481, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0462814, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0462813, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 2.88621, 'Load Store Unit/Runtime Dynamic': 0.974006, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.114122, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.228244, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0405023, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0409814, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.162824, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0212859, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.388508, 'Memory Management Unit/Runtime Dynamic': 0.0622673, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 16.2929, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.106538, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.00752528, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0681947, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
<reponame>caub/wpt from six.moves import BaseHTTPServer import errno import os import socket from six.moves.socketserver import ThreadingMixIn import ssl import sys import threading import time import traceback from six import binary_type, text_type import uuid from collections import OrderedDict from six.moves.queue import Queue from h2.config import H2Configuration from h2.connection import H2Connection from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded from six.moves.urllib.parse import urlsplit, urlunsplit from . import routes as default_routes from .config import ConfigBuilder from .logger import get_logger from .request import Server, Request, H2Request from .response import Response, H2Response from .router import Router from .utils import HTTPException from .constants import h2_headers """HTTP server designed for testing purposes. The server is designed to provide flexibility in the way that requests are handled, and to provide control both of exactly what bytes are put on the wire for the response, and in the timing of sending those bytes. The server is based on the stdlib HTTPServer, but with some notable differences in the way that requests are processed. Overall processing is handled by a WebTestRequestHandler, which is a subclass of BaseHTTPRequestHandler. This is responsible for parsing the incoming request. A RequestRewriter is then applied and may change the request data if it matches a supplied rule. Once the request data had been finalised, Request and Reponse objects are constructed. These are used by the other parts of the system to read information about the request and manipulate the response. Each request is handled by a particular handler function. The mapping between Request and the appropriate handler is determined by a Router. By default handlers are installed to interpret files under the document root with .py extensions as executable python files (see handlers.py for the api for such files), .asis files as bytestreams to be sent literally and all other files to be served statically. The handler functions are responsible for either populating the fields of the response object, which will then be written when the handler returns, or for directly writing to the output stream. """ class RequestRewriter(object): def __init__(self, rules): """Object for rewriting the request path. :param rules: Initial rules to add; a list of three item tuples (method, input_path, output_path), defined as for register() """ self.rules = {} for rule in reversed(rules): self.register(*rule) self.logger = get_logger() def register(self, methods, input_path, output_path): """Register a rewrite rule. :param methods: Set of methods this should match. "*" is a special value indicating that all methods should be matched. :param input_path: Path to match for the initial request. :param output_path: Path to replace the input path with in the request. """ if isinstance(methods, (binary_type, text_type)): methods = [methods] self.rules[input_path] = (methods, output_path) def rewrite(self, request_handler): """Rewrite the path in a BaseHTTPRequestHandler instance, if it matches a rule. :param request_handler: BaseHTTPRequestHandler for which to rewrite the request. """ split_url = urlsplit(request_handler.path) if split_url.path in self.rules: methods, destination = self.rules[split_url.path] if "*" in methods or request_handler.command in methods: self.logger.debug("Rewriting request path %s to %s" % (request_handler.path, destination)) new_url = list(split_url) new_url[2] = destination new_url = urlunsplit(new_url) request_handler.path = new_url class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): allow_reuse_address = True acceptable_errors = (errno.EPIPE, errno.ECONNABORTED) request_queue_size = 2000 # Ensure that we don't hang on shutdown waiting for requests daemon_threads = True def __init__(self, server_address, request_handler_cls, router, rewriter, bind_address, config=None, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, latency=None, http2=False, **kwargs): """Server for HTTP(s) Requests :param server_address: tuple of (server_name, port) :param request_handler_cls: BaseHTTPRequestHandler-like class to use for handling requests. :param router: Router instance to use for matching requests to handler functions :param rewriter: RequestRewriter-like instance to use for preprocessing requests before they are routed :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param use_ssl: Boolean indicating whether the server should use SSL :param key_file: Path to key file to use if SSL is enabled. :param certificate: Path to certificate to use if SSL is enabled. :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param bind_address True to bind the server to both the IP address and port specified in the server_address parameter. False to bind the server only to the port in the server_address parameter, but not to the address. :param latency: Delay in ms to wait before seving each response, or callable that returns a delay in ms """ self.router = router self.rewriter = rewriter self.scheme = "http2" if http2 else "https" if use_ssl else "http" self.logger = get_logger() self.latency = latency if bind_address: hostname_port = server_address else: hostname_port = ("",server_address[1]) #super doesn't work here because BaseHTTPServer.HTTPServer is old-style BaseHTTPServer.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs) if config is not None: Server.config = config else: self.logger.debug("Using default configuration") with ConfigBuilder(browser_host=server_address[0], ports={"http": [self.server_address[1]]}) as config: assert config["ssl_config"] is None Server.config = config self.key_file = key_file self.certificate = certificate self.encrypt_after_connect = use_ssl and encrypt_after_connect if use_ssl and not encrypt_after_connect: if http2: ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate) ssl_context.set_alpn_protocols(['h2']) self.socket = ssl_context.wrap_socket(self.socket, server_side=True) else: self.socket = ssl.wrap_socket(self.socket, keyfile=self.key_file, certfile=self.certificate, server_side=True) def handle_error(self, request, client_address): error = sys.exc_info()[1] if ((isinstance(error, socket.error) and isinstance(error.args, tuple) and error.args[0] in self.acceptable_errors) or (isinstance(error, IOError) and error.errno in self.acceptable_errors)): pass # remote hang up before the result is sent else: self.logger.error(traceback.format_exc()) class BaseWebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """RequestHandler for WebTestHttpd""" def __init__(self, *args, **kwargs): self.logger = get_logger() BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def finish_handling_h1(self, request_line_is_valid): self.server.rewriter.rewrite(self) request = Request(self) response = Response(self, request) if request.method == "CONNECT": self.handle_connect(response) return if not request_line_is_valid: response.set_error(414) response.write() return self.logger.debug("%s %s" % (request.method, request.request_path)) handler = self.server.router.get_handler(request) self.finish_handling(request, response, handler) def finish_handling(self, request, response, handler): # If the handler we used for the request had a non-default base path # set update the doc_root of the request to reflect this if hasattr(handler, "base_path") and handler.base_path: request.doc_root = handler.base_path if hasattr(handler, "url_base") and handler.url_base != "/": request.url_base = handler.url_base if self.server.latency is not None: if callable(self.server.latency): latency = self.server.latency() else: latency = self.server.latency self.logger.warning("Latency enabled. Sleeping %i ms" % latency) time.sleep(latency / 1000.) if handler is None: self.logger.debug("No Handler found!") response.set_error(404) else: try: handler(request, response) except HTTPException as e: response.set_error(e.code, e.message) except Exception as e: self.respond_with_error(response, e) self.logger.debug("%i %s %s (%s) %i" % (response.status[0], request.method, request.request_path, request.headers.get('Referer'), request.raw_input.length)) if not response.writer.content_written: response.write() # If a python handler has been used, the old ones won't send a END_STR data frame, so this # allows for backwards compatibility by accounting for these handlers that don't close streams if isinstance(response, H2Response) and not response.writer.stream_ended: response.writer.end_stream() # If we want to remove this in the future, a solution is needed for # scripts that produce a non-string iterable of content, since these # can't set a Content-Length header. A notable example of this kind of # problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1) if response.close_connection: self.close_connection = True if not self.close_connection: # Ensure that the whole request has been read from the socket request.raw_input.read() def handle_connect(self, response): self.logger.debug("Got CONNECT") response.status = 200 response.write() if self.server.encrypt_after_connect: self.logger.debug("Enabling SSL for connection") self.request = ssl.wrap_socket(self.connection, keyfile=self.server.key_file, certfile=self.server.certificate, server_side=True) self.setup() return def respond_with_error(self, response, e): message = str(e) if message: err = [message] else: err = [] err.append(traceback.format_exc()) response.set_error(500, "\n".join(err)) class Http2WebTestRequestHandler(BaseWebTestRequestHandler): protocol_version = "HTTP/2.0" def handle_one_request(self): """ This is the main HTTP/2.0 Handler. When a browser opens a connection to the server on the HTTP/2.0 port, the server enters this which will initiate the h2 connection and keep running throughout the duration of the interaction, and will read/write directly from the socket. Because there can be multiple H2 connections active at the same time, a UUID is created for each so that it is easier to tell them apart in the logs. """ config = H2Configuration(client_side=False) self.conn = H2ConnectionGuard(H2Connection(config=config)) self.close_connection = False # Generate a UUID to make it easier to distinguish different H2 connection debug messages self.uid = str(uuid.uuid4())[:8] self.logger.debug('(%s) Initiating h2 Connection' % self.uid) with self.conn as connection: connection.initiate_connection() data = connection.data_to_send() window_size = connection.remote_settings.initial_window_size self.request.sendall(data) # Dict of { stream_id: (thread, queue) } stream_queues = {} try: while not self.close_connection: data = self.request.recv(window_size) if data == '': self.logger.debug('(%s) Socket Closed' % self.uid) self.close_connection = True continue with self.conn as connection: frames = connection.receive_data(data) window_size
1 elif chroma == 6: if 55 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 8: if 67.5 < ASTM_hue < 77.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 10: if 72.5 < ASTM_hue < 77.5: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 2: if chroma == 2: if 15 < ASTM_hue < 27.5 or 77.5 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 4: if 12.5 < ASTM_hue < 30 or 62.5 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 6: if 7.5 < ASTM_hue < 22.5 or 62.5 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 8: if 7.5 < ASTM_hue < 15 or 60 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 10: if 65 < ASTM_hue < 77.5: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 3: if chroma == 2: if 10 < ASTM_hue < 37.5 or 65 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 4: if 5 < ASTM_hue < 37.5 or 55 < ASTM_hue < 72.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 6 or chroma == 8 or chroma == 10: if 7.5 < ASTM_hue < 37.5 or 57.5 < ASTM_hue < 82.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 12: if 7.5 < ASTM_hue < 42.5 or 57.5 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 4: if chroma == 2 or chroma == 4: if 7.5 < ASTM_hue < 42.5 or 57.5 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 6 or chroma == 8: if 7.5 < ASTM_hue < 40 or 57.5 < ASTM_hue < 82.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 10: if 7.5 < ASTM_hue < 40 or 57.5 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 5: if chroma == 2: if 5 < ASTM_hue < 37.5 or 55 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 4 or chroma == 6 or chroma == 8: if 2.5 < ASTM_hue < 42.5 or 55 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 10: if 2.5 < ASTM_hue < 42.5 or 55 < ASTM_hue < 82.5: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 6: if chroma == 2 or chroma == 4: if 5 < ASTM_hue < 37.5 or 55 < ASTM_hue < 87.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 6: if 5 < ASTM_hue < 42.5 or 57.5 < ASTM_hue < 87.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 8 or chroma == 10: if 5 < ASTM_hue < 42.5 or 60 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 12 or chroma == 14: if 5 < ASTM_hue < 42.5 or 60 < ASTM_hue < 82.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 16: if 5 < ASTM_hue < 42.5 or 60 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 7: if chroma == 2 or chroma == 4 or chroma == 6: if 5 < ASTM_hue < 42.5 or 60 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 8: if 5 < ASTM_hue < 42.5 or 60 < ASTM_hue < 82.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma == 10: if (30 < ASTM_hue < 42.5 or 5 < ASTM_hue < 25 or 60 < ASTM_hue < 82.5): interpolation_method = 2 else: interpolation_method = 1 elif chroma == 12: if (30 < ASTM_hue < 42.5 or 7.5 < ASTM_hue < 27.5 or 80 < ASTM_hue < 82.5): interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 14: if (32.5 < ASTM_hue < 40 or 7.5 < ASTM_hue < 15 or 80 < ASTM_hue < 82.5): interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 8: if (chroma == 2 or chroma == 4 or chroma == 6 or chroma == 8 or chroma == 10 or chroma == 12): if 5 < ASTM_hue < 40 or 60 < ASTM_hue < 85: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 14: if (32.5 < ASTM_hue < 40 or 5 < ASTM_hue < 15 or 60 < ASTM_hue < 85): interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 elif value == 9: if chroma == 2 or chroma == 4: if 5 < ASTM_hue < 40 or 55 < ASTM_hue < 80: interpolation_method = 2 else: interpolation_method = 1 elif (chroma == 6 or chroma == 8 or chroma == 10 or chroma == 12 or chroma == 14): if 5 < ASTM_hue < 42.5: interpolation_method = 2 else: interpolation_method = 1 elif chroma >= 16: if 35 < ASTM_hue < 42.5: interpolation_method = 2 else: interpolation_method = 1 else: interpolation_method = 1 return interpolation_methods.get(interpolation_method) def xy_from_renotation_ovoid(specification): """ Converts given *Munsell* *Colorlab* specification to *xy* chromaticity coordinates on *Munsell Renotation System* ovoid. The *xy* point will be on the ovoid about the achromatic point, corresponding to the *Munsell* *Colorlab* specification value and chroma. Parameters ---------- specification : numeric or tuple *Munsell* *Colorlab* specification. Returns ------- tuple *xy* chromaticity coordinates. Raises ------ ValueError If an invalid interpolation method is retrieved from internal computations. Notes ----- - Input *Munsell* *Colorlab* specification value must be an integer in domain [1, 9]. - Input *Munsell* *Colorlab* specification chroma must be an integer and a multiple of 2 in domain [2, 50]. References ---------- .. [16] **The Munsell and Kubelka-Munk Toolbox**: *MunsellAndKubelkaMunkToolboxApr2014*: *MunsellRenotationRoutines/FindHueOnRenotationOvoid.m* Examples -------- >>> xy_from_renotation_ovoid((2.5, 5.0, 12.0, 4)) # doctest: +ELLIPSIS (0.4333..., 0.5602...) >>> xy_from_renotation_ovoid(8) (0.31006, 0.31616) """ if is_grey_munsell_colour(specification): return MUNSELL_DEFAULT_ILLUMINANT_CHROMATICITY_COORDINATES else: hue, value, chroma, code = specification assert 1 <= value <= 9, ( '"{0}" specification value must be in domain [1, 9]!'.format( specification)) assert is_integer(value), ( '"{0}" specification value must be an integer!'.format( specification)) value = round(value) assert 2 <= chroma <= 50, ( '"{0}" specification chroma must be in domain [2, 50]!'.format( specification)) assert abs( 2 * (chroma / 2 - round(chroma / 2))) <= INTEGER_THRESHOLD, ( ('"{0}" specification chroma must be an integer and ' 'multiple of 2!').format(specification)) chroma = 2 * round(chroma / 2) # Checking if renotation data is available without interpolation using # given threshold. threshold = 0.001 if (abs(hue) < threshold or abs(hue - 2.5) < threshold or abs(hue - 5) < threshold or abs(hue - 7.5) < threshold or abs(hue - 10) < threshold): hue = 2.5 * round(hue / 2.5) x, y, Y = xyY_from_renotation((hue, value, chroma, code)) return x, y hue_cw, hue_ccw = bounding_hues_from_renotation(hue, code) hue_minus, code_minus = hue_cw hue_plus, code_plus = hue_ccw x_grey, y_grey = MUNSELL_DEFAULT_ILLUMINANT_CHROMATICITY_COORDINATES specification_minus = (hue_minus, value, chroma, code_minus) x_minus, y_minus, Y_minus = xyY_from_renotation( specification_minus) z_minus, theta_minus, rho_minus = cartesian_to_cylindrical( (x_minus - x_grey, y_minus - y_grey, Y_minus)) theta_minus = math.degrees(theta_minus) specification_plus = (hue_plus, value, chroma, code_plus) x_plus, y_plus, Y_plus = xyY_from_renotation(specification_plus) z_plus, theta_plus, rho_plus = cartesian_to_cylindrical( (x_plus - x_grey, y_plus - y_grey, Y_plus)) theta_plus = math.degrees(theta_plus) lower_hue_angle = hue_to_hue_angle(hue_minus, code_minus) hue_angle = hue_to_hue_angle(hue, code) upper_hue_angle = hue_to_hue_angle(hue_plus, code_plus) if theta_minus - theta_plus > 180: theta_plus += +360 if lower_hue_angle == 0:
# -*- coding: utf-8 -*- '''Text block objects based on PDF raw dict extracted with ``PyMuPDF``. Data structure based on this `link <https://pymupdf.readthedocs.io/en/latest/textpage.html>`_:: { # raw dict # -------------------------------- 'type': 0, 'bbox': (x0,y0,x1,y1), 'lines': [ lines ] # introduced dict # -------------------------------- 'before_space': bs, 'after_space': as, 'line_space': ls, 'alignment': 0, 'left_space': 10.0, 'right_space': 0.0, 'tab_stops': [15.4, 35.0] } ''' from docx.shared import Pt from docx.enum.text import WD_ALIGN_PARAGRAPH from .Lines import Lines from google_trans_new import google_translator from ..common.share import RectType, TextDirection, TextAlignment from ..common.Block import Block from ..common.share import rgb_component_from_name from ..common import constants from ..common import docx class TextBlock(Block): '''Text block.''' def __init__(self, raw:dict=None): if raw is None: raw = {} # remove key 'bbox' since it is calculated from contained lines if 'bbox' in raw: raw.pop('bbox') super().__init__(raw) # collect lines self.lines = Lines(parent=self).restore(raw.get('lines', [])) # set type self.set_text_block() # set Google Translator self.translator = google_translator() @property def text(self): '''Get text content in block, joning each line with ``\\n``.''' lines_text = [line.text for line in self.lines] # print(lines_text) # if lines_text != ['<image>']: # lines_text = self.translator.translate(lines_text, lang_tgt='ko') # lines_text = '\n'.join(lines_text) # print(lines_text) # return lines_text # print(lines_text) return '\n'.join(lines_text) @property def text_direction(self): '''All lines contained in text block must have same text direction. Otherwise, set normal direction. ''' res = set(line.text_direction for line in self.lines) # consider two text direction only: left-right, bottom-top if TextDirection.IGNORE in res: return TextDirection.IGNORE elif len(res)==1: return list(res)[0] else: return TextDirection.LEFT_RIGHT @property def average_row_gap(self): '''Average distance between adjacent two physical rows.''' idx = 1 if self.is_horizontal_text else 0 rows = self.lines.group_by_physical_rows() num = len(rows) # no gap if single row if num==1: return None # multi-lines block block_height = self.bbox[idx+2]-self.bbox[idx] f_max_row_height = lambda row: max(abs(line.bbox[idx+2]-line.bbox[idx]) for line in row) sum_row_height = sum(map(f_max_row_height, rows)) return (block_height-sum_row_height) / (num-1) @property def row_count(self): '''Count of physical rows.''' return len(self.lines.group_by_physical_rows()) def is_flow_layout(self, *args): '''Check if flow layout''' return self.lines.is_flow_layout(*args) def store(self): res = super().store() res.update({ 'lines': self.lines.store() }) return res def add(self, line_or_lines): '''Add line or lines to TextBlock.''' if isinstance(line_or_lines, (Lines, list, tuple)): for line in line_or_lines: self.lines.append(line) else: self.lines.append(line_or_lines) def strip(self): '''Strip each Line instance.''' self.lines.strip() def plot(self, page): '''Plot block/line/span area for debug purpose. Args: page (fitz.Page): pdf page. ''' # block border in blue blue = rgb_component_from_name('blue') super().plot(page, stroke=blue, dashes='[3.0 3.0] 0') # lines and spans for line in self.lines: # line border in red red = rgb_component_from_name('red') line.plot(page, stroke=red) # span regions in random color for span in line.spans: c = rgb_component_from_name('') span.plot(page, color=c) def parse_text_format(self, rects): '''Parse text format with style represented by rectangles. Args: rects (Shapes): Shapes representing potential styles applied on blocks. ''' flag = False # use each rectangle (a specific text format) to split line spans for rect in rects: # a same style rect applies on only one block # EXCEPTION: hyperlink shape is determined in advance if rect.type!=RectType.HYPERLINK and rect.is_determined: continue # any intersection with current block? if not self.bbox.intersects(rect.bbox): continue # yes, then go further to lines in block if self.lines.parse_text_format(rect): flag = True return flag def parse_horizontal_spacing(self, bbox, line_separate_threshold:float, line_break_width_ratio:float, line_break_free_space_ratio:float, lines_left_aligned_threshold:float, lines_right_aligned_threshold:float, lines_center_aligned_threshold:float): ''' Set horizontal spacing based on lines layout and page bbox. * The general spacing is determined by paragraph alignment and indentation. * The detailed spacing of block lines is determined by tab stops. Multiple alignment modes may exist in block (due to improper organized lines from ``PyMuPDF``), e.g. some lines align left, and others right. In this case, **LEFT** alignment is set, and use ``TAB`` to position each line. ''' # NOTE: in PyMuPDF CS, horizontal text direction is same with positive x-axis, # while vertical text is on the contrary, so use f = -1 here idx0, idx1, f = (0, 2, 1.0) if self.is_horizontal_text else (3, 1, -1.0) # decide text alignment by internal lines in first priority; if can't decide, check # with page layout. int_alignment = self._internal_alignment((idx0, idx1, f), line_separate_threshold, lines_left_aligned_threshold, lines_right_aligned_threshold, lines_center_aligned_threshold) ext_alignment = self._external_alignment(bbox, (idx0, idx1, f), lines_center_aligned_threshold) self.alignment = int_alignment if int_alignment!=TextAlignment.UNKNOWN else ext_alignment # if still can't decide, set LEFT by default and ensure position by TAB stops if self.alignment == TextAlignment.NONE: self.alignment = TextAlignment.LEFT # NOTE: relative stop position to left boundary of block is calculated, # so block.left_space is required fun = lambda line: round((line.bbox[idx0]-self.bbox[idx0])*f, 1) # relative position to block all_pos = set(map(fun, self.lines)) self.tab_stops = list(filter(lambda pos: pos>=constants.MINOR_DIST, all_pos)) # adjust left/right indentation: # - set single side indentation if single line # - add minor space if multi-lines row_count = self.row_count if self.alignment == TextAlignment.LEFT: if row_count==1: self.right_space = 0 else: self.right_space -= constants.MAJOR_DIST elif self.alignment == TextAlignment.RIGHT: if row_count==1: self.left_space = 0 else: self.left_space -= constants.MAJOR_DIST elif self.alignment == TextAlignment.CENTER: if row_count==1: self.left_space = 0 self.right_space = 0 else: self.left_space -= constants.MAJOR_DIST self.right_space -= constants.MAJOR_DIST # parse line break self.lines.parse_line_break(bbox, line_break_width_ratio, line_break_free_space_ratio) def parse_line_spacing_relatively(self): '''Calculate relative line spacing, e.g. `spacing = 1.02`. It's complicated to calculate relative line spacing, e.g. considering font style. A simple rule is used: line_height = 1.3 * font_size .. note:: The line spacing could be updated automatically when changing the font size, while the layout might be broken in exact spacing mode, e.g. overlapping of lines. ''' factor = 1.22 # block height idx = 1 if self.is_horizontal_text else 0 block_height = self.bbox[idx+2]-self.bbox[idx] # The layout of pdf text block: line-space-line-space-line, while # The layout of paragraph in docx: line-space-line-space-line-space, note the extra space at the end. # So, (1) calculate the line spacing x => x*1.3*sum_{n-1}(H_i) + Hn = H, # (2) calculate the extra space at the end, to be excluded from the before space of next block. rows = self.lines.group_by_physical_rows() count = len(rows) max_line_height = lambda row: max(abs(line.bbox[idx+2]-line.bbox[idx]) for line in row) last_line_height = max_line_height(rows[-1]) if count > 1: sum_pre_line_height = sum(max_line_height(row) for row in rows[:-1]) self.line_space = (block_height-last_line_height)/sum_pre_line_height/factor else: self.line_space = 1.0 # extra space at the end end_space = (self.line_space*factor-1.0) * last_line_height if self.line_space>1.0 else 0.0 return end_space def parse_line_spacing_exactly(self): '''Calculate exact line spacing, e.g. `spacing = Pt(12)`. The layout of pdf text block: line-space-line-space-line, excepting space before first line, i.e. space-line-space-line, when creating paragraph in docx. So, an average line height is ``space+line``. Then, the height of first line can be adjusted by updating paragraph before-spacing. .. note:: Compared with the relative spacing mode, it has a more precise layout, but less flexible editing ability, especially changing the font size. ''' # check text direction idx = 1 if self.is_horizontal_text else 0 bbox = self.lines[0].bbox # first line first_line_height = bbox[idx+2] - bbox[idx] block_height = self.bbox[idx+2]-self.bbox[idx] # average line spacing count = self.row_count # count of rows if count > 1: line_space = (block_height-first_line_height)/(count-1) else: line_space = block_height self.line_space = line_space # since the line height setting in docx may affect the original bbox in pdf, # it's necessary to update the before spacing: # taking bottom left corner of first line as the reference point self.before_space += first_line_height - line_space # if before spacing is negative, set to zero and adjust calculated line spacing accordingly if self.before_space < 0: self.line_space += self.before_space / count self.before_space = 0.0 def make_docx(self, p): '''Create paragraph for a text block. Refer to ``python-docx`` doc for details on text format: * https://python-docx.readthedocs.io/en/latest/user/text.html * https://python-docx.readthedocs.io/en/latest/api/enum/WdAlignParagraph.html#wdparagraphalignment Args: p (Paragraph): ``python-docx`` paragraph instance. .. note:: The left position of paragraph is set by paragraph indent, rather than ``TAB`` stop. ''' pf = docx.reset_paragraph_format(p) # vertical spacing before_spacing = max(round(self.before_space, 1), 0.0) after_spacing = max(round(self.after_space, 1), 0.0) pf.space_before = Pt(before_spacing) pf.space_after = Pt(after_spacing) # line spacing pf.line_spacing = Pt(round(self.line_space, 1)) # horizontal
<filename>src/frr/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py<gh_stars>0 #!/usr/bin/python # # Copyright (c) 2020 by VMware, Inc. ("VMware") # Used Copyright (c) 2018 by Network Device Education Foundation, Inc. # ("NetDEF") in this file. # # Permission to use, copy, modify, and/or distribute this software # for any purpose with or without fee is hereby granted, provided # that the above copyright notice and this permission notice appear # in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # """OSPF Basic Functionality Automation.""" import os import sys import time import pytest import ipaddress # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) sys.path.append(os.path.join(CWD, "../lib/")) # pylint: disable=C0413 # Import topogen and topotest helpers from lib.topogen import Topogen, get_topogen # Import topoJson from lib, to create topology and initial configuration from lib.common_config import ( start_topology, write_test_header, create_interfaces_cfg, write_test_footer, reset_config_on_routers, verify_rib, create_static_routes, step, shutdown_bringup_interface, topo_daemons, ) from lib.bgp import verify_bgp_convergence, create_router_bgp from lib.topolog import logger from lib.topojson import build_config_from_json from lib.ospf import ( verify_ospf_neighbor, clear_ospf, verify_ospf_rib, redistribute_ospf, config_ospf_interface, verify_ospf_interface, ) pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] # Global variables topo = None # number of retries. nretry = 5 NETWORK = { "ipv4": [ "172.16.31.10/32", "192.168.3.11/32", "172.16.58.3/32", "172.16.31.10/32", "192.168.3.11/32", ] } TOPOOLOGY = """ Please view in a fixed-width font such as Courier. +---+ A1 +---+ +R1 +------------+R2 | +-+-+- +--++ | -- -- | | -- A0 -- | A0| ---- | | ---- | A2 | -- -- | | -- -- | +-+-+- +-+-+ +R0 +-------------+R3 | +---+ A3 +---+ """ TESTCASES = """ 1. Test OSPF intra area route calculations. 2. Test OSPF inter area route calculations. 3. Test OSPF redistribution of connected routes. """ def setup_module(mod): """ Sets up the pytest environment * `mod`: module name """ testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) logger.info("=" * 40) logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... json_file = "{}/ospf_rte_calc.json".format(CWD) tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo # ... and here it calls Mininet initialization functions. # get list of daemons needs to be started for this suite. daemons = topo_daemons(tgen, topo) # Starting topology, create tmp files which are loaded to routers # to start deamons and then start routers start_topology(tgen, daemons) # Creating configuration from JSON build_config_from_json(tgen, topo) # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) # Api call verify whether OSPF is converged ospf_covergence = verify_ospf_neighbor(tgen, topo) assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence ) logger.info("Running setup_module() done") def teardown_module(mod): """ Teardown the pytest environment. * `mod`: module name """ logger.info("Running teardown_module to delete topology") tgen = get_topogen() # Stop toplogy and Remove tmp files tgen.stop_topology() logger.info( "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) ) logger.info("=" * 40) # ################################## # Test cases start here. # ################################## def test_ospf_redistribution_tc5_p0(request): """Test OSPF intra area route calculations.""" tc_name = request.node.name write_test_header(tc_name) tgen = get_topogen() # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base config.") reset_config_on_routers(tgen) step("Verify that OSPF neighbors are FULL.") ospf_covergence = verify_ospf_neighbor(tgen, topo) assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence ) step("verify intra area route is calculated for r0-r3 interface ip in R1") ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"] ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network) nh = topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0] input_dict = { "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]} } dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) protocol = "ospf" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Delete the ip address on newly configured interface of R0") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r1" for num in range(0, nretry): result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False) if result is not True: break assert result is not True, ( "Testcase {} : Failed \n " "r1: OSPF routes are present after deleting ip address of newly " "configured interface of R0 \n Error: {}".format(tc_name, result) ) protocol = "ospf" result = verify_rib( tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh, retry_timeout=10, expected=False, ) assert result is not True, ( "Testcase {} : Failed \n " "r1: OSPF routes are present in fib after deleting ip address of newly " "configured interface of R0 \n Error: {}".format(tc_name, result) ) step("Add back the deleted ip address on newly configured interface of R0") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) protocol = "ospf" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Shut no shut interface on R0") dut = "r0" intf = topo["routers"]["r0"]["links"]["r3"]["interface"] shutdown_bringup_interface(tgen, dut, intf, False) step("un shut the OSPF interface on R0") dut = "r0" shutdown_bringup_interface(tgen, dut, intf, True) dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) protocol = "ospf" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) def test_ospf_redistribution_tc6_p0(request): """Test OSPF inter area route calculations.""" tc_name = request.node.name write_test_header(tc_name) tgen = get_topogen() # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base config.") reset_config_on_routers(tgen) step("Verify that OSPF neighbors are FULL.") ospf_covergence = verify_ospf_neighbor(tgen, topo) assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence ) step("verify intra area route is calculated for r0-r3 interface ip in R1") ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"] ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network) nh = topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0] input_dict = { "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]} } dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) protocol = "ospf" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Delete the ip address on newly configured loopback of R0") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r1" for num in range(0, nretry): result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False) if result is not True: break assert result is not True, ( "Testcase {} : Failed \n " "r1: OSPF routes are present after deleting ip address of newly " "configured loopback of R0 \n Error: {}".format(tc_name, result) ) protocol = "ospf" result = verify_rib( tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh, expected=False, ) assert result is not True, ( "Testcase {} : Failed \n " "r1: OSPF routes are present in fib after deleting ip address of newly " "configured loopback of R0 \n Error: {}".format(tc_name, result) ) step("Add back the deleted ip address on newly configured interface of R0") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) protocol = "ospf" result =
) else: self.update_record(obj_a=obj_surrogate.item(), obj_c=obj_critic.item(), m_kl_c=100 * kl_c.mean().item(), m_kl_d=100 * kl_d.mean().item(), # a_std=self.act.a_std_log.exp().mean().item(), entropy=(-obj_entropy.item()), a0_avg=buf_action[:, 0].mean().item(), a1_avg=buf_action[:, 1].mean().item(), a0_std=buf_action[:, 0].std().item(), a1_std=buf_action[:, 1].std().item(), ) return self.train_record class AgentHierarchicalPPO2(AgentPPO2): def __init__(self, args=None): super().__init__(args) # AgentPPO is an on policy DRL algorithm self.if_on_policy = True # could be 0.2 ~ 0.5, ratio.clamp(1 - clip, 1 + clip) if args is None: pass else: self.ratio_clip = args['ratio_clip'] if 'ratio_clip' in args.keys() else 0.3 # could be 0.01 ~ 0.05 self.lambda_entropy = args['lambda_entropy'] if 'lambda_entropy' in args.keys() else 0.05 # could be 0.95 ~ 0.99, GAE (Generalized Advantage Estimation. ICLR.2016.) self.lambda_gae_adv = args['lambda_gae_adv'] if 'lambda_gae_adv' in args.keys() else 0.97 # if use Generalized Advantage Estimation self.if_use_gae = args['if_use_gae'] if 'if_use_gae' in args.keys() else True self.if_use_dn = args['if_use_dn'] if 'if_use_dn' in args.keys() else False self.total_iterations = args['total_iterations'] if 'total_iterations' in args.keys() else 1000 self.loss_coeff_cri = args['loss_coeff_cri'] if 'loss_coeff_cri' in args.keys() else 0.5 self.objective_type = args['objective_type'] if 'objective_type' in args.keys() else 'clip' self.beta = args['beta'] if 'beta' in args.keys() else None self.policy_type = args['policy_type'] if 'policy_type' in args.keys() else None self.discrete_degree = args['discrete_degree'] if 'discrete_degree' in args.keys() else 3 self.train_model = args['train_model'] if 'train_model' in args.keys() else "mix" # mix discrete continues self.save_path = args['save_path'] if 'save_path' in args.keys() else None self.target_entropy = None self.cri_optimizer = None self.act_optimizer = None self.compute_reward = None # attribution def init(self, net_dim, state_dim, action_dim, reward_dim=1, if_per=False, device=None): self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device self.compute_reward = self.compute_reward_gae if self.if_use_gae else self.compute_reward_adv # self.target_entropy = np.log(action_dim) # self.target_entropy = -action_dim self.cri_d = CriticAdv(state_dim, net_dim, self.if_use_dn).to(self.device) self.cri_c = CriticAdv(state_dim, net_dim, self.if_use_dn).to(self.device) self.act_d = ActorDiscretePPO(net_dim, state_dim, self.discrete_degree ** (action_dim - 1)).to(self.device) self.act_c = ActorPPO(net_dim, state_dim, action_dim - 1).to(self.device) if self.save_path is not None: self.load_model(self.save_path) self.act = [self.act_c, self.act_d] self.cri_c_optimizer = torch.optim.Adam(params=self.cri_d.parameters(), lr=self.learning_rate) self.cri_d_optimizer = torch.optim.Adam(params=self.cri_c.parameters(), lr=self.learning_rate) self.act_d_optimizer = torch.optim.Adam(params=self.act_d.parameters(), lr=self.learning_rate) self.act_c_optimizer = torch.optim.Adam(params=self.act_c.parameters(), lr=self.learning_rate) self.criterion = torch.nn.SmoothL1Loss() self.iter_index = 0 assert if_per is False # on-policy don't need PER @staticmethod def select_action(state, policy, explore_rate=1.) -> np.ndarray: states = torch.as_tensor((state,), dtype=torch.float32).detach_() if rd.rand() < explore_rate: # epsilon-greedy d_action = policy[1].get_action(states) c_action = policy[0].get_action(states) action = torch.cat((c_action, d_action.unsqueeze(dim=1)), 1) else: d_action = policy[1](states) c_action = policy[0](states) action = torch.cat((c_action, d_action.unsqueeze(dim=1)), 1) if action.isnan().any(): print(action) return action[0].detach().numpy() def update_net(self, buffer, _target_step, batch_size, repeat_times=4) -> (float, float): buffer.update_now_len_before_sample() buf_len = buffer.now_len # assert buf_len >= _target_step '''Trajectory using reverse reward''' with torch.no_grad(): buf_reward, buf_mask, buf_action, buf_state = buffer.sample_all() buf_action[:, :-1] = buf_action[:, :-1].clamp(-1 + 5e-8, 1 - 5e-8) bs = 2 ** 10 # set a smaller 'bs: batch size' when out of GPU memory. if self.train_model in ['discrete']: buf_value = torch.cat([self.cri_d(buf_state[i:i + bs]) for i in range(0, buf_state.size(0), bs)], dim=0) buf_r_ret, buf_adv = self.compute_reward(buf_len, buf_reward, buf_mask, buf_value) buf_d_action = buf_action[:, -1].unsqueeze(dim=1) buf_d_logprob = self.act_d.compute_logprob(buf_state, buf_d_action).unsqueeze(dim=1) elif self.train_model in ['continues']: buf_value = torch.cat([self.cri_c(buf_state[i:i + bs]) for i in range(0, buf_state.size(0), bs)], dim=0) buf_r_ret, buf_adv = self.compute_reward(buf_len, buf_reward, buf_mask, buf_value) buf_c_action = buf_action[:, :-1] buf_c_logprob = self.act_c.compute_logprob(buf_state, buf_c_action).unsqueeze(dim=1) tar_act_d = deepcopy(self.act_d) tar_cri_d = deepcopy(self.cri_d) tar_act_c = deepcopy(self.act_c) tar_cri_c = deepcopy(self.cri_c) del buf_reward, buf_mask '''PPO: Surrogate objective of Trust Region''' # mix discrete continues if self.train_model in ['mix', 'discrete']: obj_critic = None for _ in range(int(repeat_times * buf_len / batch_size)): indices = torch.randint(buf_len - 1, size=(batch_size,), requires_grad=False, device=self.device) state = buf_state[indices] d_action = buf_d_action[indices] logprob = buf_d_logprob[indices] r_ret = buf_r_ret[indices] adv = buf_adv[indices] value = self.cri_d(state) # critic network predicts the reward_sum (Q value) of state obj_critic = self.criterion(value, r_ret) self.cri_d_optimizer.zero_grad() obj_critic.backward() torch.nn.utils.clip_grad_norm_(self.cri_d.parameters(), 4.) self.cri_d_optimizer.step() new_logprob = self.act_d.compute_logprob(state, d_action).unsqueeze(dim=1) # it is obj_actor ratio = (new_logprob - logprob).clamp(-20, 2).exp() obj_surrogate1 = adv * ratio if self.objective_type in ['clip']: obj_surrogate2 = adv * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip) obj_surrogate = -torch.min(obj_surrogate1, obj_surrogate2).mean() elif self.objective_type == 'kl': mean_kl = torch.distributions.kl_divergence(tar_act_d.get_distribution(state), self.act_d.get_distribution(state)).mean() obj_surrogate = -obj_surrogate1.mean() + self.beta * mean_kl obj_entropy = (new_logprob.exp() * new_logprob).mean() obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy self.act_d_optimizer.zero_grad() obj_actor.backward() torch.nn.utils.clip_grad_norm_(self.act_d.parameters(), 4.) self.act_d_optimizer.step() tar_dist = tar_act_d.get_distribution(state) dist = self.act_d.get_distribution(state) kl = torch.distributions.kl_divergence(tar_dist, dist).mean() self.train_record['obj_da'] = obj_surrogate.item() self.train_record['obj_dc'] = obj_critic.item() self.train_record['m_kl_d'] = 100 * kl.mean().item() self.train_record['da_entropy'] = -obj_entropy.item() if self.train_model in ['mix', 'continues']: obj_critic = None for _ in range(int(repeat_times * buf_len / batch_size)): indices = torch.randint(buf_len - 1, size=(batch_size,), requires_grad=False, device=self.device) state = buf_state[indices] c_action = buf_c_action[indices] logprob = buf_c_logprob[indices] r_ret = buf_r_ret[indices] adv = buf_adv[indices] value = self.cri_c(state) # critic network predicts the reward_sum (Q value) of state obj_critic = self.criterion(value, r_ret) self.cri_c_optimizer.zero_grad() obj_critic.backward() torch.nn.utils.clip_grad_norm_(self.cri_c.parameters(), 4.) self.cri_c_optimizer.step() new_logprob = self.act_c.compute_logprob(state, c_action).unsqueeze(dim=1) # it is obj_actor ratio = (new_logprob - logprob).clamp(-20, 2).exp() obj_surrogate1 = adv * ratio if self.objective_type in ['clip']: obj_surrogate2 = adv * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip) obj_surrogate = -torch.min(obj_surrogate1, obj_surrogate2).mean() elif self.objective_type == 'kl': mean_kl = torch.distributions.kl_divergence(tar_act_c.get_distribution(state), self.act_c.get_distribution(state)).mean() obj_surrogate = -obj_surrogate1.mean() + self.beta * mean_kl obj_entropy = (new_logprob.exp() * new_logprob).mean() obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy self.act_d_optimizer.zero_grad() obj_actor.backward() torch.nn.utils.clip_grad_norm_(self.act_d.parameters(), 4.) self.act_d_optimizer.step() tar_dist = tar_act_c.get_distribution(state) dist = self.act_c.get_distribution(state) kl = torch.distributions.kl_divergence(tar_dist, dist).mean() self.train_record['obj_ca'] = obj_surrogate.item() self.train_record['obj_cc'] = obj_critic.item() self.train_record['m_kl_c'] = 100 * kl.mean().item() self.train_record['ca_entropy'] = -obj_entropy.item() self.train_record['a0_avg'] = buf_action[:, 0].item() self.train_record['a1_avg'] = buf_action[:, 1].item() self.train_record['a0_std'] = buf_action[:, 0].item() self.train_record['a1_std'] = buf_action[:, 1].item() return self.train_record def save_model(self, cwd): act_c_save_path = f'{cwd}/actor.pth' cri_c_save_path = f'{cwd}/critic.pth' act_d_save_path = f'{cwd}/actor_d.pth' cri_d_save_path = f'{cwd}/critic_d.pth' self.to_cpu() if self.act_c is not None: torch.save(self.act_c.state_dict(), act_c_save_path) if self.cri_c is not None: torch.save(self.cri_c.state_dict(), cri_c_save_path) if self.act_d is not None: torch.save(self.act_d.state_dict(), act_d_save_path) if self.cri_d is not None: torch.save(self.cri_d.state_dict(), cri_d_save_path) def load_model(self, cwd): act_c_save_path = f'{cwd}/actor.pth' cri_c_save_path = f'{cwd}/critic.pth' act_d_save_path = f'{cwd}/actor_d.pth' cri_d_save_path = f'{cwd}/critic_d.pth' def load_torch_file(network, save_path): network_dict = torch.load(save_path, map_location=lambda storage, loc: storage) network.load_state_dict(network_dict) if self.train_model in ['mix', 'continues']: if (self.act_d is not None) and os.path.exists(act_d_save_path): load_torch_file(self.act_d, act_d_save_path) print("Loaded act_d:", cwd) if (self.cri_d is not None) and os.path.exists(cri_d_save_path): load_torch_file(self.cri_d, cri_d_save_path) print("Loaded cri_d:", cwd) if self.train_model in ['mix', 'discrete']: if (self.act_c is not None) and os.path.exists(act_c_save_path): load_torch_file(self.act_c, act_c_save_path) print("Loaded act_c:", cwd) if (self.cri_c is not None) and os.path.exists(cri_c_save_path): load_torch_file(self.cri_c, cri_c_save_path) print("Loaded cri_c:", cwd) self.to_device() def to_cpu(self): device = torch.device('cpu') if next(self.act_d.parameters()).is_cuda: self.act_d.to(device) if next(self.cri_d.parameters()).is_cuda: self.cri_d.to(device) if next(self.act_c.parameters()).is_cuda: self.act_c.to(device) if next(self.cri_c.parameters()).is_cuda: self.cri_c.to(device) def to_device(self): if not next(self.act_d.parameters()).is_cuda: self.act_d.to(self.device) if not next(self.cri_d.parameters()).is_cuda: self.cri_d.to(self.device) if not next(self.act_c.parameters()).is_cuda: self.act_c.to(self.device) if not next(self.cri_c.parameters()).is_cuda: self.cri_c.to(self.device) class AgentRNNPPO2(AgentPPO2): def __init__(self, args=None): super().__init__(args) # AgentPPO is an on policy DRL algorithm self.if_on_policy = True # could be 0.2 ~ 0.5, ratio.clamp(1 - clip, 1 + clip) if args is None: pass else: self.ratio_clip = args['ratio_clip'] if 'ratio_clip' in args.keys() else 0.3 # could be 0.01 ~ 0.05 self.lambda_entropy = args['lambda_entropy'] if 'lambda_entropy' in args.keys() else 0.05 # could be 0.95 ~ 0.99, GAE (Generalized Advantage Estimation. ICLR.2016.) self.lambda_gae_adv = args['lambda_gae_adv'] if 'lambda_gae_adv' in args.keys() else 0.97 # if use Generalized Advantage Estimation self.if_use_gae = args['if_use_gae'] if 'if_use_gae' in args.keys() else True self.if_use_dn = args['if_use_dn'] if 'if_use_dn' in args.keys() else False self.total_iterations = args['total_iterations'] if 'total_iterations' in args.keys() else 1000 self.loss_coeff_cri = args['loss_coeff_cri'] if 'loss_coeff_cri' in args.keys() else 0.5 self.objective_type = args['objective_type'] if 'objective_type' in args.keys() else 'clip' self.beta = args['beta'] if 'beta' in args.keys() else None self.policy_type = args['policy_type'] if 'policy_type' in args.keys() else None self.if_store_state = args['if_store_state'] if 'if_store_state' in args.keys() else True self.hidden_state_dim = args['hidden_state_dim'] if 'hidden_state_dim' in args.keys() else 128 self.rnn_timestep = args['rnn_timestep'] if 'rnn_timestep' in args.keys() else 16 self.infer_by_sequence = args['infer_by_sequence'] if 'infer_by_sequence' in args.keys() else False self.target_entropy = None self.cri_optimizer = None self.act_optimizer = None self.compute_reward = None # attribution def init(self, net_dim, state_dim, action_dim, reward_dim=1, if_per=False, device=None): self.state_dim = state_dim self.action_dim = action_dim self.reward_dim = reward_dim self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device self.compute_reward = self.compute_reward_gae if self.if_use_gae else self.compute_reward_adv # self.target_entropy = np.log(action_dim) # self.target_entropy = -action_dim if self.infer_by_sequence: self.cri = CarlaRNNPPOSequence(net_dim, state_dim, action_dim, hidden_state_dim=self.hidden_state_dim, if_store_state=self.if_store_state) else: if self.policy_type in ['mg']: self.cri = CarlaRNNPPOMG(net_dim, state_dim, action_dim, hidden_state_dim=self.hidden_state_dim, if_store_state=self.if_store_state) else: self.cri = CarlaRNNPPO(net_dim, state_dim, action_dim, hidden_state_dim=self.hidden_state_dim, if_store_state=self.if_store_state) self.act = self.cri self.optimizer = torch.optim.Adam(params=self.cri.parameters(), lr=self.learning_rate) self.criterion = torch.nn.SmoothL1Loss() self.iter_index
ip_geolocation) if minifies is not None: pulumi.set(__self__, "minifies", minifies) if mirage is not None: pulumi.set(__self__, "mirage", mirage) if opportunistic_encryption is not None: pulumi.set(__self__, "opportunistic_encryption", opportunistic_encryption) if origin_error_page_pass_thru is not None: pulumi.set(__self__, "origin_error_page_pass_thru", origin_error_page_pass_thru) if polish is not None: pulumi.set(__self__, "polish", polish) if resolve_override is not None: pulumi.set(__self__, "resolve_override", resolve_override) if respect_strong_etag is not None: pulumi.set(__self__, "respect_strong_etag", respect_strong_etag) if response_buffering is not None: pulumi.set(__self__, "response_buffering", response_buffering) if rocket_loader is not None: pulumi.set(__self__, "rocket_loader", rocket_loader) if security_level is not None: pulumi.set(__self__, "security_level", security_level) if server_side_exclude is not None: pulumi.set(__self__, "server_side_exclude", server_side_exclude) if sort_query_string_for_cache is not None: pulumi.set(__self__, "sort_query_string_for_cache", sort_query_string_for_cache) if ssl is not None: pulumi.set(__self__, "ssl", ssl) if true_client_ip_header is not None: pulumi.set(__self__, "true_client_ip_header", true_client_ip_header) if waf is not None: pulumi.set(__self__, "waf", waf) @property @pulumi.getter(name="alwaysOnline") def always_online(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "always_online") @property @pulumi.getter(name="alwaysUseHttps") def always_use_https(self) -> Optional[bool]: """ Boolean of whether this action is enabled. Default: false. """ return pulumi.get(self, "always_use_https") @property @pulumi.getter(name="automaticHttpsRewrites") def automatic_https_rewrites(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "automatic_https_rewrites") @property @pulumi.getter(name="browserCacheTtl") def browser_cache_ttl(self) -> Optional[str]: """ The Time To Live for the browser cache. `0` means 'Respect Existing Headers' """ return pulumi.get(self, "browser_cache_ttl") @property @pulumi.getter(name="browserCheck") def browser_check(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "browser_check") @property @pulumi.getter(name="bypassCacheOnCookie") def bypass_cache_on_cookie(self) -> Optional[str]: """ String value of cookie name to conditionally bypass cache the page. """ return pulumi.get(self, "bypass_cache_on_cookie") @property @pulumi.getter(name="cacheByDeviceType") def cache_by_device_type(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "cache_by_device_type") @property @pulumi.getter(name="cacheDeceptionArmor") def cache_deception_armor(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "cache_deception_armor") @property @pulumi.getter(name="cacheKeyFields") def cache_key_fields(self) -> Optional['outputs.PageRuleActionsCacheKeyFields']: """ Controls how Cloudflare creates Cache Keys used to identify files in cache. See below for full description. """ return pulumi.get(self, "cache_key_fields") @property @pulumi.getter(name="cacheLevel") def cache_level(self) -> Optional[str]: """ Whether to set the cache level to `"bypass"`, `"basic"`, `"simplified"`, `"aggressive"`, or `"cache_everything"`. """ return pulumi.get(self, "cache_level") @property @pulumi.getter(name="cacheOnCookie") def cache_on_cookie(self) -> Optional[str]: """ String value of cookie name to conditionally cache the page. """ return pulumi.get(self, "cache_on_cookie") @property @pulumi.getter(name="cacheTtlByStatuses") def cache_ttl_by_statuses(self) -> Optional[Sequence['outputs.PageRuleActionsCacheTtlByStatus']]: """ Set cache TTL based on the response status from the origin web server. Can be specified multiple times. See below for full description. """ return pulumi.get(self, "cache_ttl_by_statuses") @property @pulumi.getter(name="disableApps") def disable_apps(self) -> Optional[bool]: """ Boolean of whether this action is enabled. Default: false. """ return pulumi.get(self, "disable_apps") @property @pulumi.getter(name="disablePerformance") def disable_performance(self) -> Optional[bool]: """ Boolean of whether this action is enabled. Default: false. """ return pulumi.get(self, "disable_performance") @property @pulumi.getter(name="disableRailgun") def disable_railgun(self) -> Optional[bool]: """ Boolean of whether this action is enabled. Default: false. """ return pulumi.get(self, "disable_railgun") @property @pulumi.getter(name="disableSecurity") def disable_security(self) -> Optional[bool]: """ Boolean of whether this action is enabled. Default: false. """ return pulumi.get(self, "disable_security") @property @pulumi.getter(name="edgeCacheTtl") def edge_cache_ttl(self) -> Optional[int]: """ The Time To Live for the edge cache. """ return pulumi.get(self, "edge_cache_ttl") @property @pulumi.getter(name="emailObfuscation") def email_obfuscation(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "email_obfuscation") @property @pulumi.getter(name="explicitCacheControl") def explicit_cache_control(self) -> Optional[str]: """ Whether origin Cache-Control action is `"on"` or `"off"`. """ return pulumi.get(self, "explicit_cache_control") @property @pulumi.getter(name="forwardingUrl") def forwarding_url(self) -> Optional['outputs.PageRuleActionsForwardingUrl']: """ The URL to forward to, and with what status. See below. """ return pulumi.get(self, "forwarding_url") @property @pulumi.getter(name="hostHeaderOverride") def host_header_override(self) -> Optional[str]: """ Value of the Host header to send. """ return pulumi.get(self, "host_header_override") @property @pulumi.getter(name="ipGeolocation") def ip_geolocation(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "ip_geolocation") @property @pulumi.getter def minifies(self) -> Optional[Sequence['outputs.PageRuleActionsMinify']]: """ The configuration for HTML, CSS and JS minification. See below for full list of options. """ return pulumi.get(self, "minifies") @property @pulumi.getter def mirage(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "mirage") @property @pulumi.getter(name="opportunisticEncryption") def opportunistic_encryption(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "opportunistic_encryption") @property @pulumi.getter(name="originErrorPagePassThru") def origin_error_page_pass_thru(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "origin_error_page_pass_thru") @property @pulumi.getter def polish(self) -> Optional[str]: """ Whether this action is `"off"`, `"lossless"` or `"lossy"`. """ return pulumi.get(self, "polish") @property @pulumi.getter(name="resolveOverride") def resolve_override(self) -> Optional[str]: """ Overridden origin server name. """ return pulumi.get(self, "resolve_override") @property @pulumi.getter(name="respectStrongEtag") def respect_strong_etag(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "respect_strong_etag") @property @pulumi.getter(name="responseBuffering") def response_buffering(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "response_buffering") @property @pulumi.getter(name="rocketLoader") def rocket_loader(self) -> Optional[str]: """ Whether to set the rocket loader to `"on"`, `"off"`. """ return pulumi.get(self, "rocket_loader") @property @pulumi.getter(name="securityLevel") def security_level(self) -> Optional[str]: """ Whether to set the security level to `"off"`, `"essentially_off"`, `"low"`, `"medium"`, `"high"`, or `"under_attack"`. """ return pulumi.get(self, "security_level") @property @pulumi.getter(name="serverSideExclude") def server_side_exclude(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "server_side_exclude") @property @pulumi.getter(name="sortQueryStringForCache") def sort_query_string_for_cache(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "sort_query_string_for_cache") @property @pulumi.getter def ssl(self) -> Optional[str]: """ Whether to set the SSL mode to `"off"`, `"flexible"`, `"full"`, `"strict"`, or `"origin_pull"`. """ return pulumi.get(self, "ssl") @property @pulumi.getter(name="trueClientIpHeader") def true_client_ip_header(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "true_client_ip_header") @property @pulumi.getter def waf(self) -> Optional[str]: """ Whether this action is `"on"` or `"off"`. """ return pulumi.get(self, "waf") @pulumi.output_type class PageRuleActionsCacheKeyFields(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "queryString": suggest = "query_string" if suggest: pulumi.log.warn(f"Key '{key}' not found in PageRuleActionsCacheKeyFields. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PageRuleActionsCacheKeyFields.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PageRuleActionsCacheKeyFields.__key_warning(key) return super().get(key, default) def __init__(__self__, *, cookie: 'outputs.PageRuleActionsCacheKeyFieldsCookie', header: 'outputs.PageRuleActionsCacheKeyFieldsHeader', host: 'outputs.PageRuleActionsCacheKeyFieldsHost', query_string: 'outputs.PageRuleActionsCacheKeyFieldsQueryString', user: 'outputs.PageRuleActionsCacheKeyFieldsUser'): """ :param 'PageRuleActionsCacheKeyFieldsCookieArgs' cookie: Controls what cookies go into Cache Key: :param 'PageRuleActionsCacheKeyFieldsHeaderArgs' header: Controls what HTTP headers go into Cache Key: :param 'PageRuleActionsCacheKeyFieldsHostArgs' host: Controls which Host header goes into Cache Key: :param 'PageRuleActionsCacheKeyFieldsQueryStringArgs' query_string: Controls which URL query string parameters go into the Cache Key. :param 'PageRuleActionsCacheKeyFieldsUserArgs' user: Controls which end user-related features go into the Cache Key. """ pulumi.set(__self__, "cookie", cookie) pulumi.set(__self__, "header", header) pulumi.set(__self__, "host", host) pulumi.set(__self__, "query_string", query_string) pulumi.set(__self__, "user", user) @property @pulumi.getter def cookie(self) -> 'outputs.PageRuleActionsCacheKeyFieldsCookie': """ Controls what cookies go into Cache Key: """ return pulumi.get(self, "cookie") @property @pulumi.getter def header(self) -> 'outputs.PageRuleActionsCacheKeyFieldsHeader': """ Controls what HTTP headers go into Cache Key: """ return pulumi.get(self, "header") @property @pulumi.getter def host(self) -> 'outputs.PageRuleActionsCacheKeyFieldsHost': """ Controls which Host header goes into Cache Key: """ return pulumi.get(self, "host") @property @pulumi.getter(name="queryString") def query_string(self) -> 'outputs.PageRuleActionsCacheKeyFieldsQueryString': """ Controls which URL query string parameters go into the Cache Key. """ return pulumi.get(self, "query_string") @property @pulumi.getter def user(self) -> 'outputs.PageRuleActionsCacheKeyFieldsUser': """ Controls which end user-related features go into the Cache Key. """ return pulumi.get(self, "user") @pulumi.output_type class PageRuleActionsCacheKeyFieldsCookie(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "checkPresences": suggest = "check_presences" if suggest: pulumi.log.warn(f"Key '{key}' not found in PageRuleActionsCacheKeyFieldsCookie. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PageRuleActionsCacheKeyFieldsCookie.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PageRuleActionsCacheKeyFieldsCookie.__key_warning(key) return super().get(key, default) def __init__(__self__, *, check_presences: Optional[Sequence[str]] = None, includes: Optional[Sequence[str]] = None): """ :param Sequence[str] check_presences: Check for presence of specified HTTP headers, without including their actual values. :param Sequence[str] includes: Only use values of specified query string parameters in Cache Key. """ if check_presences is not None: pulumi.set(__self__, "check_presences", check_presences) if includes is not None: pulumi.set(__self__, "includes", includes) @property @pulumi.getter(name="checkPresences") def check_presences(self) -> Optional[Sequence[str]]: """ Check for presence of specified HTTP headers, without including their actual values. """ return pulumi.get(self, "check_presences") @property @pulumi.getter def includes(self) -> Optional[Sequence[str]]: """ Only use values of specified query string parameters in Cache Key. """ return pulumi.get(self, "includes") @pulumi.output_type class PageRuleActionsCacheKeyFieldsHeader(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "checkPresences":
debug myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") # force_change_account_currency.py ask=MyPopUpDialogBox(toolbox_frame_, theStatus="Are you sure you want to FORCE change an Account's Currency?", theTitle="FORCE CHANGE CURRENCY", theMessage="This is normally a BAD idea, unless you know you want to do it....!\n" "The typical scenario is where you have duplicated Currencies and you want to move\n" "transactions from one account to another, but the system prevents you unless they are the same currency\n" "This fix will NOT attempt to correct any transactions or fx rates etc... It simply changes the currency\n" "set on the account to the new currency. You should carefully review your data afterwards and revert\n" "to a backup if you are not happy with the results....\n" "\n", lCancelButton=True, OKButtonText="I AGREE - PROCEED", lAlertLevel=2) if not ask.go(): statusLabel.setText(("User did not say yes to FORCE change an Account's currency - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return del ask currencies=[] book = moneydance.getCurrentAccountBook() allCurrencies = book.getCurrencies().getAllCurrencies() for c in allCurrencies: if c.getCurrencyType() == CurrencyType.Type.CURRENCY: # noqa currencies.append(c) currencies = sorted(currencies, key=lambda sort_x: (sort_x.getName().upper())) accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(5)) accounts = sorted(accounts, key=lambda sort_x: (sort_x.getAccountType(), sort_x.getFullAccountName().upper())) newAccounts = [] for acct in accounts: newAccounts.append(StoreAccountList(acct)) selectedAccount = JOptionPane.showInputDialog(toolbox_frame_, "Select the Account to FORCE change currency", "FORCE CHANGE ACCOUNT's CURRENCY", JOptionPane.WARNING_MESSAGE, None, newAccounts, None) # type: StoreAccountList if not selectedAccount: statusLabel.setText(("User did not Select an Account to FORCE change currency - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return selectedAccount = selectedAccount.obj # type: Account # noinspection PyUnresolvedReferences currencies.remove(selectedAccount.getCurrencyType()) selectedCurrency = JOptionPane.showInputDialog(toolbox_frame_, "Old Currency: %s >> Select the new currency for the account" %(selectedAccount.getCurrencyType()), # noqa "FORCE CHANGE ACCOUNT's CURRENCY", JOptionPane.ERROR_MESSAGE, None, currencies, None) # type: CurrencyType if not selectedCurrency: statusLabel.setText(("User did not Select an new currency for Account FORCE change - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return ask=MyPopUpDialogBox(toolbox_frame_, theStatus="Are you sure you want to FORCE change this Account's Currency?", theTitle="FORCE CHANGE CURRENCY", theMessage="Account: %s\n" "Old Currency: %s\n" "New Currency: %s\n" %(selectedAccount.getFullAccountName(), selectedAccount.getCurrencyType(),selectedCurrency), # noqa lCancelButton=True, OKButtonText="I AGREE - PROCEED", lAlertLevel=2) if not ask.go(): statusLabel.setText(("User aborted the FORCE change to an Account's currency - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.RED) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return if not confirm_backup_confirm_disclaimer(toolbox_frame_, statusLabel, "FORCE CHANGE CURRENCY", "FORCE CHANGE ACCOUNT %s CURRENCY" %(selectedAccount.getFullAccountName())): # noqa return myPrint("B","@@ User requested to Force Change the Currency of Account: %s from: %s to %s - APPLYING UPDATE NOW...." %(selectedAccount.getFullAccountName(),selectedAccount.getCurrencyType(),selectedCurrency)) # noqa moneydance_data.setRecalcBalances(False) moneydance_ui.setSuspendRefresh(True) selectedAccount.setCurrencyType(selectedCurrency) # noqa selectedAccount.syncItem() # noqa moneydance_ui.getMain().saveCurrentAccount() moneydance_data.setRecalcBalances(True) moneydance_ui.setSuspendRefresh(False) root = moneydance.getRootAccount() moneydance_data.notifyAccountModified(root) statusLabel.setText(("The Account: %s has been changed to Currency: %s- PLEASE REVIEW" %(selectedAccount.getAccountName(),selectedAccount.getCurrencyType())).ljust(800, " ")) # noqa statusLabel.setForeground(Color.RED) play_the_money_sound() myPopupInformationBox(toolbox_frame_,"The Account: %s has been changed to Currency: %s - PLEASE RESTART MD & REVIEW" %(selectedAccount.getAccountName(),selectedAccount.getCurrencyType()),theMessageType=JOptionPane.ERROR_MESSAGE) # noqa myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()") return def reverse_txn_amounts(statusLabel): global toolbox_frame_, debug myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") # reverse_txn_amounts.py ask=MyPopUpDialogBox(toolbox_frame_, theStatus="Are you sure you want to REVERSE Transaction amounts on an Account's Transactions (between two dates)?", theTitle="REVERSE TRANSACTIONAL AMOUNTS", theMessage="This is normally a BAD idea, unless you know you want to do it....!\n" "The typical scenario is where you perhaps imported transactions with the wrong +/- sign\n" "..or perhaps you have changed an account's type\n" "This fix will not touch the ROOT account nor Investment/Security sub-accounts (which are stocks/shares)\n" "You should carefully review your data afterwards and revert to a backup if you are not happy with the results....", lCancelButton=True, OKButtonText="I AGREE - PROCEED", lAlertLevel=2) if not ask.go(): statusLabel.setText(("User did not say yes to REVERSE TXN AMOUNTS - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"User did not say yes to REVERSE TXN AMOUNTS - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return del ask accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(20)) accounts = sorted(accounts, key=lambda sort_x: (sort_x.getAccountType(), sort_x.getFullAccountName().upper())) newAccounts = [] for acct in accounts: newAccounts.append(StoreAccountList(acct)) selectedAccount = JOptionPane.showInputDialog(toolbox_frame_, "Select the Account to REVERSE Transactional Amounts", "REVERSE ACCOUNT's TXN AMOUNTS", JOptionPane.WARNING_MESSAGE, None, newAccounts, None) # type: StoreAccountList if not selectedAccount: statusLabel.setText(("User did not Select an Account to REVERSE Transactional Amounts - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return selectedAccount = selectedAccount.obj # type: Account dateField = JDateField(moneydance_ui) if not JOptionPane.showConfirmDialog(toolbox_frame_, dateField, "Select Starting Date for reverse", JOptionPane.OK_CANCEL_OPTION)==JOptionPane.OK_OPTION: statusLabel.setText(("User did not select start date - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"User did not select start date - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return startDate = dateField.getDateInt() dateField.gotoToday() if not JOptionPane.showConfirmDialog(toolbox_frame_, dateField, "Select Ending Date for reverse", JOptionPane.OK_CANCEL_OPTION)==JOptionPane.OK_OPTION: statusLabel.setText(("User did not select end date - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"User did not select end date - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return endDate = dateField.getDateInt() txnSet = moneydance_data.getTransactionSet() txns = txnSet.iterableTxns() iTxnsFound = 0 for txn in txns: if txn.getDateInt() < startDate: continue if txn.getDateInt() > endDate: continue acct = txn.getAccount() if not acct == selectedAccount: continue iTxnsFound += 1 if iTxnsFound < 1: statusLabel.setText(("REVERSE TXN AMOUNTS - Sorry - no transactions found - NO CHANGES MADE").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"REVERSE TXN AMOUNTS - Sorry - no transactions found - NO CHANGES MADE",theMessageType=JOptionPane.WARNING_MESSAGE) return if not confirm_backup_confirm_disclaimer(toolbox_frame_, statusLabel, "REVERSE ACCT TXN AMOUNTS", "ACCOUNT %s - REVERSE %s Txns' amounts between %s - %s?" %(selectedAccount,iTxnsFound,startDate,endDate)): return myPrint("B","@@ User requested to REVERSE the (%s) Txn Amounts on Account %s between %s to %s - APPLYING UPDATE NOW...." %(iTxnsFound, selectedAccount, startDate, endDate)) moneydance_data.setRecalcBalances(False) moneydance_ui.setSuspendRefresh(True) for txn in txns: if txn.getDateInt() < startDate: continue if txn.getDateInt() > endDate: continue acct = txn.getAccount() if not acct == selectedAccount: continue myPrint("B","Reversing the amount on %s" %(txn)) ptxn = txn.getParentTxn() ptxn.setEditingMode() if ptxn == txn: # this is the parent part of the txn myPrint("B", " - is a parent, changing each split") for splitIdx in range(0, txn.getSplitCount()): txn.getSplit(splitIdx).negateAmount() else: myPrint("B", " - is a split") txn.negateAmount() ptxn.syncItem() moneydance_ui.getMain().saveCurrentAccount() moneydance_data.setRecalcBalances(True) moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root) statusLabel.setText(("REVERSE %s Txns Amounts on Account %s between %s - %s COMPLETED - PLEASE REVIEW" %(iTxnsFound,selectedAccount,startDate, endDate)).ljust(800, " ")) statusLabel.setForeground(Color.RED) myPrint("B", "REVERSE %s Txns Amounts on Account %s between %s - %s COMPLETED - PLEASE REVIEW" %(iTxnsFound,selectedAccount,startDate, endDate)) play_the_money_sound() myPopupInformationBox(toolbox_frame_, "REVERSE %s Txns Amounts on Account %s between %s - %s COMPLETED - PLEASE REVIEW" %(iTxnsFound,selectedAccount,startDate, endDate), theMessageType=JOptionPane.ERROR_MESSAGE) myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()") return def reverse_txn_exchange_rates_by_account_and_date(statusLabel): global toolbox_frame_, debug myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") # reverse_txn_exchange_rates_by_account_and_date.py ask=MyPopUpDialogBox(toolbox_frame_, theStatus="Are you sure you want to REVERSE Exchange Rates on an Account's Transactions (between two dates)?", theTitle="REVERSE TRANSACTIONAL EXCHANGE RATES", theMessage="This is normally a BAD idea, unless you know you want to do it....!\n" "The typical scenario is where you perhaps imported transactions with the fx rates inversed \n" "This fix will not touch the Currency price history...!\n" "This fix will not touch the ROOT account nor Investment/Security sub-accounts (which are stocks/shares)\n" "You should carefully review your data afterwards and revert to a backup if you are not happy with the results....", lCancelButton=True, OKButtonText="I AGREE - PROCEED", lAlertLevel=2) if not ask.go(): statusLabel.setText(("User did not say yes to REVERSE TXN EXCHANGE RATES - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"User did not say yes to REVERSE TXN EXCHANGE RATES - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return del ask accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(20)) accounts = sorted(accounts, key=lambda sort_x: (sort_x.getAccountType(), sort_x.getFullAccountName().upper())) newAccounts = [] for acct in accounts: newAccounts.append(StoreAccountList(acct)) selectedAccount = JOptionPane.showInputDialog(toolbox_frame_, "Select the Account to REVERSE Transactional Exchange Rates", "REVERSE ACCOUNT's TXN EXCHANGE RATES", JOptionPane.WARNING_MESSAGE, None, newAccounts, None) # type: StoreAccountList if not selectedAccount: statusLabel.setText(("User did not select an Account to REVERSE Transactional Exchange Rates - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return selectedAccount = selectedAccount.obj # type: Account dateField = JDateField(moneydance_ui) if not JOptionPane.showConfirmDialog(toolbox_frame_, dateField, "Select STARTING Date for reverse", JOptionPane.OK_CANCEL_OPTION)==JOptionPane.OK_OPTION: statusLabel.setText(("User did not select start date - no changes made").ljust(800, " ")) statusLabel.setForeground(Color.BLUE) myPopupInformationBox(toolbox_frame_,"User did not select start date - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE) return startDate = dateField.getDateInt() dateField.gotoToday() if not JOptionPane.showConfirmDialog(toolbox_frame_, dateField, "Select ENDING Date for reverse", JOptionPane.OK_CANCEL_OPTION)==JOptionPane.OK_OPTION: statusLabel.setText(("User did not select end date - no changes
""" path = to_bytes_or_null(path) plaintext = to_bytes_or_null(plaintext) ciphertext = ffi.new("uint8_t **") ciphertext_size = ffi.new("size_t *") ret = lib.Fapi_Encrypt( self.ctx, path, plaintext, len(plaintext), ciphertext, ciphertext_size ) if ret == lib.TPM2_RC_SUCCESS: result = bytes(ffi.unpack(ciphertext[0], ciphertext_size[0])) lib.Fapi_Free(ciphertext[0]) return result raise TSS2_Exception(ret) def decrypt(self, path: Union[bytes, str], ciphertext: bytes) -> bytes: """Decrypt the ciphertext and return the plaintext. Args: path (bytes or str): The decrypt key used for decryption. ciphertext (bytes or str): The data to be decrypted. Raises: TSS2_Exception: If Fapi returned an error code. Returns: bytes: The plaintext. """ path = to_bytes_or_null(path) plaintext = ffi.new("uint8_t **") plaintext_size = ffi.new("size_t *") ret = lib.Fapi_Decrypt( self.ctx, path, ciphertext, len(ciphertext), plaintext, plaintext_size ) if ret == lib.TPM2_RC_SUCCESS: result = bytes(ffi.unpack(plaintext[0], plaintext_size[0])) lib.Fapi_Free(plaintext[0]) return result raise TSS2_Exception(ret) def create_seal( self, path: Union[bytes, str], data: Optional[Union[bytes, str]] = None, type: Optional[Union[bytes, str]] = None, policy_path: Optional[Union[bytes, str]] = None, auth_value: Optional[Union[bytes, str]] = None, exists_ok: bool = False, ) -> bool: """Create a Fapi sealed (= encrypted) object, that is data sealed a Fapi parent key. Oftentimes, the data is a digest. Args: path (bytes or str): The path of the new sealed object. data (bytes or str, optional): Data to be sealed (often a digest). If None, random data will be generated. Defaults to None. type (bytes or str, optional): Comma separated list. Possible values: system, sign, decrypt, restricted, exportable, noda, 0x81000000. Defaults to None. policy_path (bytes or str, optional): The path to the policy which will be associated with the sealed object. Defaults to None. auth_value (bytes or str, optional): Password to protect the new sealed object. Defaults to None. exists_ok (bool, optional): Do not throw a TSS2_Exception if an object with the given path already exists. Defaults to False. Raises: TSS2_Exception: If Fapi returned an error code. Returns: bool: True if the sealed object was created. False otherwise. """ # TODO if data is none, user should be able to give a size (of the random data) path = to_bytes_or_null(path) data = to_bytes_or_null(data) type = to_bytes_or_null(type) policy_path = to_bytes_or_null(policy_path) auth_value = to_bytes_or_null(auth_value) ret = lib.Fapi_CreateSeal( self.ctx, path, type, len(data), policy_path, auth_value, data ) if ret == lib.TPM2_RC_SUCCESS: return True if exists_ok and ret == lib.TSS2_FAPI_RC_PATH_ALREADY_EXISTS: return False raise TSS2_Exception(ret) def unseal(self, path: Union[bytes, str]) -> bytes: """Unseal a sealed (= encrypted) Fapi object and return the data in plaintext. Args: path (Union[bytes, str]): The path to the sealed object. Raises: TSS2_Exception: If Fapi returned an error code. Returns: bytes: The unsealed data in plaintext. """ path = to_bytes_or_null(path) data = ffi.new("uint8_t **") data_size = ffi.new("size_t *") ret = lib.Fapi_Unseal(self.ctx, path, data, data_size) if ret == lib.TPM2_RC_SUCCESS: result = bytes(ffi.unpack(data[0], data_size[0])) lib.Fapi_Free(data[0]) return result raise TSS2_Exception(ret) def import_object( self, path: Union[bytes, str], import_data: Union[bytes, str], exists_ok: bool = False, ) -> bool: """Import policy, policy template or key into the keystore. Args: path (bytes or str): Path of the future Fapi object. import_data (bytes or str): JSON-encoded data to import. exists_ok (bool, optional): Do not throw a TSS2_Exception if an object with the given path already exists. Defaults to False. Raises: TSS2_Exception: If Fapi returned an error code. Returns: bool: True if the object was imported. False otherwise. """ path = to_bytes_or_null(path) import_data = to_bytes_or_null(import_data) ret = lib.Fapi_Import(self.ctx, path, import_data) if ret == lib.TPM2_RC_SUCCESS: return True if exists_ok and ret == lib.TSS2_FAPI_RC_PATH_ALREADY_EXISTS: return False raise TSS2_Exception(ret) def delete(self, path: Union[bytes, str]) -> None: """Delete Fapi object. Args: path (bytes or str): Path to the Fapi object to delete. Raises: TSS2_Exception: If Fapi returned an error code. """ path = to_bytes_or_null(path) ret = lib.Fapi_Delete(self.ctx, path) if ret == lib.TPM2_RC_SUCCESS: return raise TSS2_Exception(ret) def change_auth( self, path: Union[bytes, str], auth_value: Optional[Union[bytes, str]] = None ) -> None: """Change the password to a Fapi object. Args: path (bytes or str): Path to the Fapi object. auth_value (bytes or str, optional): New password. Defaults to None. Raises: TSS2_Exception: If Fapi returned an error code. """ path = to_bytes_or_null(path) auth_value = to_bytes_or_null(auth_value) ret = lib.Fapi_ChangeAuth(self.ctx, path, auth_value) if ret == lib.TPM2_RC_SUCCESS: return raise TSS2_Exception(ret) def export_key( self, path: Union[bytes, str], new_path: Union[bytes, str] = None ) -> str: """Export a Fapi object as a JSON-encoded string. Args: path (bytes or str): Path to the existing Fapi object. new_path (bytes or str, optional): New path to the Fapi object. Defaults to None. Raises: TSS2_Exception: If Fapi returned an error code. Returns: str: The exported data. """ path = to_bytes_or_null(path) new_path = to_bytes_or_null(new_path) exported_data = ffi.new("char **") ret = lib.Fapi_ExportKey(self.ctx, path, new_path, exported_data) if ret == lib.TPM2_RC_SUCCESS: result = ffi.string(exported_data[0]).decode(self.encoding) lib.Fapi_Free(exported_data[0]) return result raise TSS2_Exception(ret) def set_description( self, path: Union[bytes, str], description: Optional[Union[bytes, str]] = None ) -> None: """Set the description of a Fapi object. Args: path (bytes or str): Path to the Fapi object. description (bytes or str, optional): New description of the Fapi object. Defaults to None. Raises: TSS2_Exception: If Fapi returned an error code. """ path = to_bytes_or_null(path) description = to_bytes_or_null(description) ret = lib.Fapi_SetDescription(self.ctx, path, description) if ret == lib.TPM2_RC_SUCCESS: return raise TSS2_Exception(ret) def get_description(self, path: Union[bytes, str] = None) -> str: """Get the description of a Fapi object. Args: path (bytes or str): Path to the Fapi object. Raises: TSS2_Exception: If Fapi returned an error code. Returns: str: The description of the Fapi object. """ path = to_bytes_or_null(path) description = ffi.new("char **") ret = lib.Fapi_GetDescription(self.ctx, path, description) if ret == lib.TPM2_RC_SUCCESS: # description is guaranteed to be a null-terminated string result = ffi.string(description[0]).decode() lib.Fapi_Free(description[0]) return result raise TSS2_Exception(ret) def set_app_data( self, path: Union[bytes, str], app_data: Optional[Union[bytes, str]] = None ) -> None: """Add custom application data to a Fapi object. This data is saved alongside the object and can be used by the application. Args: path (bytes or str): Path to the Fapi object. app_data (bytes or str, optional): Custom application data to be associated with the Fapi object. Defaults to None. Raises: TSS2_Exception: If Fapi returned an error code. """ path = to_bytes_or_null(path) app_data = to_bytes_or_null(app_data) app_data_size = len(app_data) ret = lib.Fapi_SetAppData(self.ctx, path, app_data, app_data_size) if ret == lib.TPM2_RC_SUCCESS: return raise TSS2_Exception(ret) def get_app_data(self, path: Union[bytes, str]) -> Optional[bytes]: """Get the custom application data of a Fapi object. Args: path (bytes or str): Path to the Fapi object. Raises: TSS2_Exception: If Fapi returned an error code. Returns: Optional[bytes]: The application data or None. """ path = to_bytes_or_null(path) app_data = ffi.new("uint8_t **") app_data_size = ffi.new("size_t *") ret = lib.Fapi_GetAppData(self.ctx, path, app_data, app_data_size) if ret == lib.TPM2_RC_SUCCESS: if app_data[0] == ffi.NULL: result = None else: result = bytes(ffi.unpack(app_data[0], app_data_size[0])) lib.Fapi_Free(app_data[0]) return result raise TSS2_Exception(ret) def set_certificate( self, path: Union[bytes, str], certificate: Optional[Union[bytes, str]] = None ) -> None: """Add x509 certificate to a Fapi object. This data is saved alongside the object and can be used by the application. Args: path (bytes or str): Path to the Fapi object. certificate (bytes or str, optional): x509 certificate to be associated with the Fapi object. Defaults to None. Raises: TSS2_Exception: If Fapi returned an error code. """ path = to_bytes_or_null(path) certificate = to_bytes_or_null(certificate) ret = lib.Fapi_SetCertificate(self.ctx, path, certificate) if ret == lib.TPM2_RC_SUCCESS: return raise TSS2_Exception(ret) def get_certificate(self, path: Union[bytes, str]) -> str: """Get the custom application data of a Fapi object. Args: path (bytes or str): Path to the Fapi object. Raises: TSS2_Exception: If Fapi returned an error code. Returns: bytes: The application data. """ path = to_bytes_or_null(path) certificate = ffi.new("char **") ret = lib.Fapi_GetCertificate(self.ctx, path, certificate) if ret == lib.TPM2_RC_SUCCESS: # certificate is guaranteed to be a null-terminated string result = ffi.string(certificate[0]).decode() lib.Fapi_Free(certificate[0]) return result raise TSS2_Exception(ret) def get_platform_certificates(self, no_cert_ok: bool = False) -> bytes: # TODO doc # TODO split certificates into list # TODO why bytes? is this DER? certificate = ffi.new("uint8_t **") certificates_size = ffi.new("size_t *") ret = lib.Fapi_GetPlatformCertificates(self.ctx, certificate, certificates_size) if ret == lib.TPM2_RC_SUCCESS: result
of pre-segmentation of RMieS-corrected spectra and pathologist annotation. With regard to the learning behaviour of the deep neuronal networks, it could be shown that no new classifier has to be built but that the existing networks in transfer learning can be used for a variety of applications, while the false positive number could be significantly reduced. [2] The data from groundtruth was processed under the following conditions: - Agilent Resolution Pro Software. - Fourier Transformation using Merz phase correction. - Blackman-Harris-4-term apodization and zero filling of 2. Specifications for own use: The spectral data must be available as 2d-numpy array which is structured as follows: x_data = x_axis*y_axis, z_axis It is important for the application to observe the data points on the z-axis The classification ( dl.net(x_data,classify=True) ) of the individual RMieS-uncorrected spectra (semantic segmentation) is carried out on the first 450 wavenumbers between 950 and 1800 cm^1. The correction ( dl.net(x_data, miecorr=True) ) of the raw data is done on the first 909 wavenumbers between 950 and 2300 cm^1. Examples: import openvibspec.ml_ftir as ovml dl = ovml.DeepLearn() x_pred, model = dl.net(x_data[:,:450],classify=True) x_corr, model = dl.net(x_data[:,:909], miecorr=True) Args: x_data(numpy array): classify=False(str): if True it uses the entered data (x_data) to predict previously learned 19 classes on uncorrected FTIR spectra of human colon tissue miecorr=False(str): if True it uses the entered data (x_data) to predict the regression of the RMieS-Correction Function based on Bassan References: [2] Classification of (RMieS) uncorrected FTIR spectra with deep neural networks. https://academic.oup.com/bioinformatics/article-abstract/36/1/287/5521621 [3] Deep neural networks for the correction of RMie Scattering in FTIR data. https://arxiv.org/abs/2002.07681 #-------------------------------------------------------------------------------------------------------------------------------------------------------------------- DeepLearn.transfer() The transfer function is based on using the data representations discovered by the existing networks for faster learning on new data. For example, the networks trained on ffpe can be used to create classification networks for other tissues and their laboratory preparation with significantly less data. For further informations regarding the theoretical part of this procedure, please see reference [2]. Besides the spectral data a groundtruth as label is needed for the transfer learning. Models and weights are automatically saved in the working directory in *h5 and *json format using following naming convention: model_ptMLP_MieReg_%d-%m-%Y_%I-%M-%S_%p Examples: import openvibspec.ml_ftir as ovml dl = ovml.DeepLearn() dl.transfer(x_data[:5,:909],y_data, batch=10, train_epochs=10, miecorr=True, trainable=False) dl.transfer(x_data[:5,:909],x_data_corrected[:5,:909], batch=10, train_epochs=10, miecorr=True, trainable=False) Args: x_data(numpy array): 2D array shape(x_axis*y_axis, z_axis) y_data(numpy array): label vector with classes assigned as numbers from 1 to n batch(int): number of examples per batch train_epochs(int): number of iterations per training add_l(list of int()): possible list for adding layers classify=True(str): classification modus miecorr=True(str): regresssion modus trainable=False(str): if trainable=True: allows the adjustment of the already loaded weights from the pretrained networks #-------------------------------------------------------------------------------------------------------------------------------------------------------------------- DeepLearn.load_and_predict() This function allows to load and use the trained network which was saved under DeepLearn.transfer() Examples: import openvibspec.ml_ftir as ovml dl = ovml.DeepLearn() a = dl.load_and_predict(x_new_data[:,:450],'model_ptMLP_class_DATE_TIME') Args: x_new_data(numpy array): 2D array shape(x_axis*y_axis, z_axis) model_ptMLP_class_DATE_TIME(str): model_ptMLP_MieReg_* or model_ptMLP_class_* """ def net( self, x, classify=False, miecorr=False, predict=False, train=False, show=False ): import keras from keras.models import model_from_json # import tensorflow as tf # import tensorflow.compat.v1 as tf # tf.disable_v2_behavior() """ #################################################################################################### # DETERMINE WICH MODEL PARAMETERS YOU WANT TO USE # CLASSIFY == TRUE GIVES THE MODEL TRAINED TO CLASSIFY ALL CELLUAR COMPONENTS BASED ON SPECTRA # BETWEEN 950-1800 WVN # # MIECORR == TRUE GIVES THE CORRESPONDING NEURAL NETWORK FOR PERFORMING EFFICIENT RMIE-CORRECTION # ON FFPE-BASED TISSUE SPECTRA # #################################################################################################### """ ############# TODO # # CURRENTLY THE loaded_model INSTANCE IS EQUIPPED WITH DROPOUT LAYERS # SINSCE THE ORIGINAL MODEL WAS BASED ON THEANO THEY CAN ONLY BE USED IN A MONTE-CARLO-DROPOUT WAY # # THIS SHOULD BE IMPLEMENTED AS 2ND CASE # # ############# if classify == True: if x.shape[1] != 450: raise ValueError( "This is a classification problem: Your spectral data needs 450 datapoints in WVN range of 950-1800 1/cm" ) json_file = open( os.path.join(str(MODELPATH) + "/model_weights_classification.json"), "r" ) loaded_model_json = json_file.read() loaded_model = model_from_json(loaded_model_json) if show == True: print(loaded_model.summary()) loaded_model.load_weights( os.path.join(str(MODELPATH) + "/model_weights_classification.best.hdf5") ) print("Loaded model from disk") model2 = keras.Sequential( [ loaded_model.layers[0], loaded_model.layers[1], loaded_model.layers[3], loaded_model.layers[5], loaded_model.layers[7], loaded_model.layers[9], loaded_model.layers[11], loaded_model.layers[13], ] ) # model = loaded_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) from sklearn.preprocessing import normalize trX = normalize(x, axis=1, norm="l2") return model2.predict(trX) if miecorr == True: if x.shape[1] != 909: raise ValueError( "This is a regression problem: Your spectral data needs 909 datapoints in WVN range of 950-2300 1/cm" ) #################################################################################################### # THIS MODEL NEEDS THE FIRST 909 WVN. RANGE FROM 950-2300 WVN 1/cm # # # ####################################################################################################x json_file = open( os.path.join(str(MODELPATH) + "/model_weights_regression.json"), "r" ) loaded_model_json = json_file.read() loaded_model = model_from_json(loaded_model_json) if show == True: print(loaded_model.summary()) loaded_model.load_weights( os.path.join(str(MODELPATH) + "/model_weights_regression.best.hdf5") ) print("Loaded model from disk") loaded_model.compile(loss="mean_squared_error", optimizer="adam") from sklearn.preprocessing import normalize trX = normalize(x, axis=1, norm="l2") return loaded_model.predict(trX) def transfer( self, x, y, batch, train_epochs, add_l=[], classify=False, miecorr=False, trainable=False, ): import keras from keras.models import model_from_json from keras.models import Sequential from datetime import datetime from sklearn.preprocessing import normalize """ ALL PARTS OF THE TRANSFER-LEARNING NETWORKS ON FTIR SPECTROSCOPIC DATA """ trX = normalize(x, axis=1, norm="l2") # def onehot(y): # import keras # from keras.utils import np_utils # # c = np.max(y) + 1 # # y1hot = np_utils.to_categorical(y, num_classes=c) # # return(y1hot) def add_layer(): from keras.layers import Dense from keras.models import Model # yoh = onehot(y) yoh = y sm = int(yoh.shape[1]) print("training on", sm, "classes") json_file = open( os.path.join(str(MODELPATH) + "/model_weights_classification.json"), "r" ) loaded_model_json = json_file.read() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights( os.path.join(str(MODELPATH) + "/model_weights_classification.best.hdf5") ) if trainable == False: for layer in loaded_model.layers: layer.trainable = False else: for layer in loaded_model.layers: layer.trainable = True if not add_l: model2 = keras.Sequential( [ loaded_model.layers[0], loaded_model.layers[1], loaded_model.layers[3], loaded_model.layers[5], loaded_model.layers[7], loaded_model.layers[9], loaded_model.layers[11], loaded_model.layers[13], ] ) preds = Dense(sm, name="newlast", activation="softmax")( model2.layers[-1].output ) model2 = Model(inputs=model2.input, outputs=preds) model2.compile( loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"], ) history = model2.fit(trX, yoh, batch_size=batch, epochs=train_epochs) print(model2.summary()) if add_l: def add_2_model(add_l): base = Model( inputs=loaded_model.input, outputs=loaded_model.layers[-1].output, ) model = Sequential() model.add(base) model.add(Dense(add_l[0], input_dim=450, activation="relu")) for layer_size in add_l[1:]: model.add(Dense(layer_size, activation="relu")) model.add(Dense(sm, activation="softmax")) return model model = add_2_model(add_l) model.compile( loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"], ) history = model.fit(trX, yoh, batch_size=batch, epochs=train_epochs) print(model.summary()) dtstr = datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p") model_json = model.to_json() with open("model_ptMLP_class_" + dtstr + ".json", "w") as json_file: json_file.write(model_json) model.save_weights("model_model_ptMLP_class_" + dtstr + ".h5") print( "Saved model to disk to", "model_model_ptMLP_class_" + dtstr + ".json" ) print("and weights to") print("Saved model to disk to", "model_model_ptMLP_class_" + dtstr + ".h5") ###########################PLOTTING########################## history_dict = history.history history_dict.keys() a = np.array(history_dict["acc"]) print(a.shape) l = np.array(history_dict["loss"]) e = range(1, len(a) + 1) plt.plot(e, a, "bo", color="red", label="Acc Training") plt.plot(e, l, "b", label="Loss Training") plt.xlabel("Epochs") plt.legend() plt.savefig("model.pdf") return (model, history_dict) def simple_val_of_data(x, y): from sklearn.model_selection import train_test_split from random import randrange from sklearn.preprocessing import normalize trX = normalize(x, axis=1, norm="l2") seed = randrange(999) print("used random seed was", seed) x_train, x_test, y_train, y_test = train_test_split( trX, y, test_size=0.4, random_state=seed ) return x_train, x_test, y_train, y_test def train_layer(): sm = int(y.shape[1]) json_filer = open( os.path.join(str(MODELPATH) + "/model_weights_regression.json"), "r" ) loaded_model_jsonr = json_filer.read() loaded_modelr = model_from_json(loaded_model_jsonr) loaded_modelr.load_weights( os.path.join(str(MODELPATH) + "/model_weights_regression.best.hdf5") ) if trainable == False: for layer in loaded_modelr.layers: layer.trainable = False else: for layer in loaded_modelr.layers: layer.trainable = True loaded_modelr.compile(loss="mean_squared_error", optimizer="adam") history = loaded_modelr.fit(x, y, batch_size=batch, epochs=train_epochs) dtstr = datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p") print(loaded_modelr.summary()) model_json = loaded_modelr.to_json() with open("model_ptMLP_MieReg_" + dtstr + ".json", "w") as json_file: json_file.write(model_json) loaded_modelr.save_weights("model_model_ptMLP_MieReg_" + dtstr + ".h5") print( "Saved model to disk to", "model_model_ptMLP_MieReg_" + dtstr + ".json" ) print("and weights to") print("Saved model to disk to", "model_model_ptMLP_MieReg_" + dtstr + ".h5") return if classify == True: if x.shape[1] != 450: raise ValueError( "This is a classification problem: x needs to be 450 datapoints in WVN range of 950-1800 1/cm" ) mod, h = add_layer() if miecorr == True: if y.shape[1] != x.shape[1]: raise ValueError( "This is a regression problem: x and y need 909 datapoints in WVN range of 950-2300 1/cm" ) train_layer() # def gan_ir_upsample(self,lst_class, lst_ir): # def gan_ir_upsample(lst_class, lst_ir): # def gan_ir_upsample(path_class, path_ir): def gan_ir_upsample(self, path_class, path_ir): """ INPUT: lst_class: python
import torch from torch import nn from model.BaseModules import TransformerDecoderLayer from model.Encoder import BaseEncoder, AutoEncoder, TemporalConvNet class Seq2SeqLSTM(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, **kwargs): super(Seq2SeqLSTM, self).__init__() self.encoder = nn.LSTM(source_size, hidden_size, **kwargs) self.decoder = nn.LSTM(target_size, hidden_size, **kwargs) def forward(self, src, tgt, **kwargs): enc, enc_hx = self.encoder(src) dec, _ = self.decoder(tgt, hx=enc_hx) return enc, dec class Seq2SeqLSTM_new(BaseEncoder): def __init__(self, source_size, target_size, pred_len, hidden_size, **kwargs): super(Seq2SeqLSTM_new, self).__init__() self.pred_len = pred_len self.encoder = nn.LSTM(source_size, hidden_size, **kwargs) self.decoder = nn.LSTM(target_size, hidden_size, **kwargs) def forward(self, src, **kwargs): """ :param src: (Time, batch*building, State) :param kwargs: :return: """ enc, enc_hx = self.encoder(src) h_x, _ = enc_hx dims = h_x.ndim - 1 dec_in = h_x.repeat(self.pred_len, *([1] * dims)) dec, _ = self.decoder(dec_in, hx=enc_hx) return enc, dec class Seq2SeqAttnEncoder(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, target_fn, auto_encoder_kwargs, attn_kwargs, lstm_kwargs=None, **kwargs): super(Seq2SeqAttnEncoder, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.target_fn = target_fn self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs) self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs) self.HistoryTemporalModule = TransformerDecoderLayer(hidden_size, **attn_kwargs) self.ForecastTemporalModule = TransformerDecoderLayer(hidden_size, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State*2)`. """ def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor, lead_dims): tensor.transpose_(0, -2) return tensor.reshape(*lead_dims, *tensor.shape[-2:]) src, tgt = self.target_fn(x) assert src.shape[:-2] == tgt.shape[:-2] h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1) h_s, tgt = to_seq_first(h_s), to_seq_first(tgt) _, h_t = self.seq2seq(src=h_s, tgt=tgt) h_cur = h_s[[-1]] h_t = self.ForecastTemporalModule(tgt=h_cur, memory=h_t) h_s = self.HistoryTemporalModule(tgt=h_cur, memory=h_s) out = undo_seq_first(torch.cat((h_s, h_t), dim=-1), src.shape[:-2]).squeeze(-2) # out.transpose_(0, 1) # -> (Building, Batch, State) # out = self.BuildingAttnModule(out) # out.transpose_(0, 1) # -> (Batch, Building, State) return out class Seq2SeqTCNEncoder(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, target_fn, auto_encoder_kwargs, unique_kwargs_history, unique_kwargs_forecast, lstm_kwargs=None, **kwargs): super(Seq2SeqTCNEncoder, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.target_fn = target_fn self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs) self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs) self.HistoryTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_history) self.ForecastTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_forecast) # self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State*2)`. """ def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor, lead_dims): tensor.transpose_(0, -2) return tensor.reshape(*lead_dims, *tensor.shape[-2:]) def to_TCN_input(tensor): # tensor: (seq, batch*building, s_dim) tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim) old_shape = tensor.shape return tensor.reshape((-1, 9, *old_shape[-2:])) def reverse_t_dim(tensor): inv_idx = torch.arange(tensor.size(2) - 1, -1, -1).long().to(tensor.device) # or equivalently torch.range(tensor.size(0)-1, 0, -1).long() inv_tensor = tensor.index_select(2, inv_idx) return inv_tensor src, tgt = self.target_fn(x) assert src.shape[:-2] == tgt.shape[:-2] h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1) h_s, tgt = to_seq_first(h_s), to_seq_first(tgt) _, h_t = self.seq2seq(src=h_s, tgt=tgt) # h_cur = h_s[[-1]] # h_t = self.ForecastTemporalModule(tgt=h_cur, memory=h_t) # h_s = self.HistoryTemporalModule(tgt=h_cur, memory=h_s) # (seq, batch*building, s_dim) # TCN input: (batch, building, seq, s_dim) h_t = self.ForecastTemporalModule(reverse_t_dim(to_TCN_input(h_t))) # (batch, building, 128) h_s = self.HistoryTemporalModule(to_TCN_input(h_s)) # reverse the forecast sequence on t-dim out = torch.cat((h_s, h_t), dim=-1) # (batch, building, 256) # out.transpose_(0, 1) # -> (Building, Batch, State) # out = self.BuildingAttnModule(out) # out.transpose_(0, 1) # -> (Batch, Building, State) return out class Seq2SeqSymTCNEncoder_old(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, target_fn, pred_len, auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs): super(Seq2SeqSymTCNEncoder_old, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.pred_len = pred_len self.target_fn = target_fn self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs) self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs) self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs) # self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State*2)`. """ def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor, lead_dims): tensor.transpose_(0, -2) return tensor.reshape(*lead_dims, *tensor.shape[-2:]) def to_TCN_input(tensor): # tensor: (seq, batch*building, s_dim) tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim) old_shape = tensor.shape return tensor.reshape((-1, 9, *old_shape[-2:])) src, tgt = self.target_fn(x) assert src.shape[:-2] == tgt.shape[:-2] h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1) h_s, tgt = to_seq_first(h_s), to_seq_first(tgt) _, h_t = self.seq2seq(src=h_s, tgt=tgt) # h_cur = h_s[[-1]] h = torch.cat((h_s, h_t), dim=0) # (seq, batch*building, s_dim) # TCN input: (batch, building, seq, s_dim) out = self.TemporalModule((to_TCN_input(h))).unbind(-2)[-1] # (batch, building, seq, 128) # out.transpose_(0, 1) # -> (Building, Batch, State) # out = self.BuildingAttnModule(out) # out.transpose_(0, 1) # -> (Batch, Building, State) return out[:, :, -(self.pred_len + 1)] class Seq2SeqMixedTCNEncoder(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, target_fn, auto_encoder_kwargs, unique_kwargs_history, unique_kwargs_forecast, lstm_kwargs=None, **kwargs): super(Seq2SeqMixedTCNEncoder, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.target_fn = target_fn self.auto_encoder = AutoEncoder(21, hidden_size, **auto_encoder_kwargs).eval() self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs).eval() self.HistoryTemporalModule = TemporalConvNet(source_size, hidden_size, **unique_kwargs_history) self.ForecastTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_forecast) # self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State*2)`. """ def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor, lead_dims): tensor.transpose_(0, -2) return tensor.reshape(*lead_dims, *tensor.shape[-2:]) def to_TCN_input(tensor): # tensor: (seq, batch*building, s_dim) tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim) old_shape = tensor.shape return tensor.reshape((-1, 9, *old_shape[-2:])) def reverse_t_dim(tensor): inv_idx = torch.arange(tensor.size(2) - 1, -1, -1).long().to(tensor.device) # or equivalently torch.range(tensor.size(0)-1, 0, -1).long() inv_tensor = tensor.index_select(2, inv_idx) return inv_tensor src_full, src, tgt = self.target_fn(x) assert src_full.shape[:-2] == src.shape[:-2] == tgt.shape[:-2] h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1) h_s, tgt = to_seq_first(h_s), to_seq_first(tgt) _, h_t = self.seq2seq(src=h_s, tgt=tgt) # (seq, batch*building, s_dim) # TCN input: (batch, building, seq, s_dim) # reverse the forecast sequence on t-dim h_t = self.ForecastTemporalModule(reverse_t_dim(to_TCN_input(h_t))).unbind(-2)[-1] h_s = self.HistoryTemporalModule(src_full).unbind(-2)[-1] # (batch, building, 128) out = torch.cat((h_s, h_t), dim=-1) # (batch, building, 256) # out.transpose_(0, 1) # -> (Building, Batch, State) # out = self.BuildingAttnModule(out) # out.transpose_(0, 1) # -> (Batch, Building, State) return out class Seq2SeqSymTCNEncoder(BaseEncoder): def __init__(self, source_size, target_size, hidden_size, target_fn, pred_len, auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs): super(Seq2SeqSymTCNEncoder, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.pred_len = pred_len self.target_fn = target_fn self.history_AE = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs) # src_size = 21 self.auto_encoder = AutoEncoder(source_size - 2, hidden_size, **auto_encoder_kwargs) # src_size = 19 self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs) self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs) # self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State*2)`. """ def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor, lead_dims): tensor.transpose_(0, -2) return tensor.reshape(*lead_dims, *tensor.shape[-2:]) def to_TCN_input(tensor): # tensor: (seq, batch*building, s_dim) tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim) old_shape = tensor.shape return tensor.reshape((-1, 9, *old_shape[-2:])) src, tgt = self.target_fn(x) src_noSOC = src[:, :, :, :-2] # discard soc states assert src.shape[:-2] == tgt.shape[:-2] # generate hidden states of history seq h_s_out = self.history_AE(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1) h_s_out = to_seq_first(h_s_out) # generate hidden states of forecast seq h_s = self.auto_encoder(src_noSOC.reshape(-1, src_noSOC.size(-1))).reshape(*src_noSOC.shape[:-1], -1) h_s, tgt = to_seq_first(h_s), to_seq_first(tgt) _, h_t_out = self.seq2seq(src=h_s, tgt=tgt) h = torch.cat((h_s_out, h_t_out), dim=0) # (seq, batch*building, s_dim) # TCN input: (batch, building, seq, s_dim) out = self.TemporalModule((to_TCN_input(h))).unbind(-2)[-1] # (batch, building, seq, 128) # out.transpose_(0, 1) # -> (Building, Batch, State) # out = self.BuildingAttnModule(out) # out.transpose_(0, 1) # -> (Batch, Building, State) return out[:, :, -(self.pred_len + 1)] class Seq2SeqSymTCNEncoder_new(BaseEncoder): def __init__(self, enc_src_size, history_src_size, hidden_size, pred_len, auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs): super(Seq2SeqSymTCNEncoder_new, self).__init__() if lstm_kwargs is None: lstm_kwargs = {} self.pred_len = pred_len self.history_AE = AutoEncoder(history_src_size, hidden_size, **auto_encoder_kwargs) # src_size = 21 self.auto_encoder = AutoEncoder(enc_src_size, hidden_size, **auto_encoder_kwargs) # src_size = 31 self.seq2seq = Seq2SeqLSTM_new(source_size=hidden_size, target_size=hidden_size, pred_len=pred_len, hidden_size=hidden_size, **lstm_kwargs) self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs) # self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs) def forward(self, x): """ :param x: the state sequence :return: hidden state Shape: - x: :math:`(Batch, Building, Time, State)`. - return: :math:`(Batch, Building, Hidden_State)`. """ def extract_history(states): """ :param states: dim=33->21 :return: """ result_list = [ states[:, :, :, 0:10], states[:, :, :, 10:11], states[:, :, :, 14:15], states[:, :, :, 18:19], states[:, :, :, 22:23], states[:, :, :, 26:33], ] return torch.cat(result_list, -1) def to_seq_first(tensor): tensor = tensor.unsqueeze(0).transpose(0, -2) return tensor.reshape(tensor.size(0), -1, tensor.size(-1)) def undo_seq_first(tensor,
<filename>grid_set.py # here is the class that holds all the data days/months # it has all the gridding scripts needed # it will save load all the data/days/months as needed # rewriting so all grids and in i-j coords import numpy as np import pandas as pd import datetime import copy from netCDF4 import Dataset from numba import jit from scipy import stats import data_year as dy from dateutil.relativedelta import relativedelta from mpl_toolkits.basemap import Basemap from scipy.interpolate import griddata from scipy import sparse class grid_set: # will make one of these at a time point (as a datetime) defined by timestart def __init__(self,mplot): self.mplot = mplot self.proj = True self.files = False self.saved = False self.grid = False self.gridinfo = False self.masked = False self.data = False # def import_regrid # # takes another data, on a seperate lon/lat, and regrids into a data/day/month # def import_regrid_nc # def import_regrid_vec # # takes vector input, a seperate lon/lat, and regrids into a data/day/month # # makes use of rotate/regrid/rotate methods to get it all pretty # def import_regrid_vec_nc def set_proj(self,mplot): # puts in a projection mplot too # # make make sure you keep hold of regridding projs self.mplot = mplot self.proj = True def reproject(self,mplot): self.xpts, self.ypts = mplot(self.lons,self.lats) self.mplot = mplot for a in dir(self): if a == 'xptp': self.get_ptp() break def set_grid_lon_lat(self,lons,lats,grid_list = False,fill_lonlat = False): # creates a grid depending on wanted resolution if fill_lonlat: lons,lats = np.meshgrid(lons,lats) if self.proj: xpts, ypts = self.mplot(lons,lats) self.lons = lons self.lats = lats self.xpts = xpts self.ypts = ypts if grid_list: print("Linear grid list. Following grid_set methods won't apply, though Gs2Gs regridding will") print("Zero values set for saving") self.dxRes = 1.0 self.dyRes = 1.0 self.m = 1 self.n = 1 self.ang_c = 1.0 self.ang_s = 1.0 self.xdist = 1.0 self.ydist = 1.0 self.gridinfo = True self.grid = True else: self.dxRes = np.mean(np.diff(xpts[0,:])) self.dyRes = np.mean(np.diff(ypts[:,0])) self.m,self.n = np.shape(lons) self.grid = True print("Got a grid res = ",self.m," x ",self.n) print("Note that all grid info is in nx x ny grids, whilst data is in nx x ny") else: print("Projection not defined yet, do that first") def get_ptp(self): """ Generates pts arrays for pcolor and pcolormesh - midpoitns for grid areas """ if self.grid: # extend longitude by 2 xpt_pad = np.pad(self.xpts, ((0,0),(1,0)), 'edge') ypt_pad = np.pad(self.ypts, ((1,0),(0,0)), 'edge') self.xptp = xpt_pad[:,:-1]+0.5*(np.diff(xpt_pad,axis=1)) self.yptp = ypt_pad[:-1,:]+0.5*(np.diff(ypt_pad,axis=0)) def set_grid_dxy(self,dxRes,dyRes): # creates a grid depending on wanted resolution if self.proj: nx = int((self.mplot.xmax-self.mplot.xmin)/dxRes)+1 ny = int((self.mplot.ymax-self.mplot.ymin)/dyRes)+1 lons, lats, xpts, ypts = self.mplot.makegrid(ny, nx, returnxy=True) self.lons = lons self.lats = lats self.xpts = xpts self.ypts = ypts self.dxRes = dxRes self.dyRes = dyRes self.grid = True self.m = nx self.n = ny print("Got a grid res = ",nx," x ",ny) print("Note that all grid info is in nx x ny grids, whilst data is in nx x ny") else: print("Projection not defined yet, do that first") def set_gate_grid(self,lonG,latG,npoints=100,aspect=100): # creates a grid depending on wanted resolution x,y = self.mplot(lonG,latG) aspect = 100 xpts = np.linspace(x[0],x[1],npoints) ypts = np.linspace(y[0],y[1],npoints) ystep = (xpts[-1]- xpts[0])/aspect xstep = (ypts[-1]- ypts[0])/aspect xpts = np.vstack([xpts,xpts - xstep]).T ypts = np.vstack([ypts,ypts + ystep]).T lons,lats = self.mplot(xpts,ypts,inverse=True) self.lons = lons self.lats = lats self.xpts = xpts self.ypts = ypts self.dxRes = np.abs(ystep) self.dyRes = np.abs(xstep) self.grid = True self.m = npoints self.n = 2 print("Got a gate res = ",self.m," ({:g} m)".format(self.dxRes), " x ",self.n) def set_grid_mn(self,nx,ny): # creates a grid depending on wanted no. of points if self.proj: lons, lats, xpts, ypts = self.mplot.makegrid(ny, nx, returnxy=True) self.lons = lons self.lats = lats self.xpts = xpts self.ypts = ypts self.grid = True self.dxRes = (self.mplot.xmax-self.mplot.xmin)/(nx - 1) self.dyRes = (self.mplot.ymax-self.mplot.ymin)/(ny - 1) self.m = nx self.n = ny print("Got a grid res = ",nx," x ",ny) else: print("Projection not defined yet, do that first") def get_grid_info(self,av_dist = True, av_ang = True): # creates a grid depending on wanted no. of points # print( self.grid and (not self.gridinfo)) if self.grid and (not self.gridinfo): #iterate over the grid to get dimensions and angles # first iterate all x dimensions - m-1/n array # then iterate all y dimensions - m/n-1 array xdims = np.empty([self.m-1,self.n]) ydims = np.empty([self.m,self.n-1]) self.xdist = np.empty([self.m,self.n]) self.ydist = np.empty([self.m,self.n]) self.ang_c = np.empty([self.m,self.n]) self.ang_s = np.empty([self.m,self.n]) for i in range(self.m): for j in range(self.n-1): ydims[i,j] = ellipsoidal_distance( self.lons[i,j ],self.lats[i,j ], self.lons[i,j+1],self.lats[i,j+1],deg=True) for i in range(self.m-1): for j in range(self.n): xdims[i,j] = ellipsoidal_distance( self.lons[i ,j],self.lats[i ,j], self.lons[i+1,j],self.lats[i+1,j],deg=True) # then average the available distances i-1,i j-1,j if av_dist: for i in range(self.m): for j in range(self.n): self.xdist[i,j] = np.nanmean(xdims[:i+1,j][-2:]) self.ydist[i,j] = np.nanmean(ydims[i,:j+1][-2:]) else: self.xdist[:-1,:] = xdims self.xdist[-1,:] = xdims[-1,:] self.ydist[:,:-1] = ydims self.ydist[:,-1] = ydims[:,-1] print("Grid distances calculated: ",np.nanmean(self.xdist)," x ",np.nanmean(self.ydist)) # then iterate all angles - this is all points plus the extra possible angles # pad the lon lat arrays for iteration lon_pad = np.pad(self.lons, (1,1), 'linear_ramp', end_values=(np.nan)) lat_pad = np.pad(self.lats, (1,1), 'linear_ramp', end_values=(np.nan)) for j in range(self.m): for i in range(self.n): # i + angle yPlus_c,yPlus_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1], lon_pad[j+2,i+1],lat_pad[j+2,i+1], return_trig = True,deg=True) if av_ang: xPlus_c,xPlus_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1], lon_pad[j+1,i+2],lat_pad[j+1,i+2], return_trig = True,deg=True) xMins_c,xMins_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1], lon_pad[j+1,i ],lat_pad[j+1,i ], return_trig = True,deg=True) yMins_c,yMins_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1], lon_pad[j ,i+1],lat_pad[j ,i+1], return_trig = True,deg=True) # average all the components first checking the orientation # if j == 20 and i ==12: # print([xPlus_c,xMins_c,yPlus_c,yMins_c]) # print([xPlus_s,xMins_s,yPlus_s,yMins_s]) if av_ang: self.ang_c[j,i] = np.nanmean([-xPlus_s, xMins_s, yPlus_c,-yMins_c]) self.ang_s[j,i] = np.nanmean([ xPlus_c,-xMins_c, yPlus_s,-yMins_s]) mag = np.hypot(self.ang_c[j,i],self.ang_s[j,i]) self.ang_c[j,i] /= mag self.ang_s[j,i] /= mag else: self.ang_c[j,i] = yPlus_c self.ang_s[j,i] = yPlus_s print('Angles calculated') self.gridinfo = True else: print("Grid not defined yet, do that first") def get_square_points(self): """ makes the xsq,ysq fields that will let you plot on a square grid uses np.meshgrid to make location arrasy statring lower left at (0,0) """ self.xsq,self.ysq = np.meshgrid(np.linspace(0,1,self.m),np.linspace(0,1,self.n),indexing = 'ij') def check_angles(self,point=False,scale=1.0,project = False): # return np.hypot of con/sin, min/max and mean check_ang = np.hypot(self.ang_c,self.ang_s)**2 print('mean ='+str(np.nanmean(check_ang))) print('max ='+str(np.nanmax(check_ang))) print('min ='+str(np.nanmin(check_ang))) # if a point is given return a vector to north and x positive # so it can be plotted on projection if (type(point) == list and project): # do it using the projection i = point[0] j = point[1] # vector is due up (0,1) Out1 = (self.xpts[i,j],self.ypts[i,j]) # due north (easy) xrot = np.array(0.0) #-self.ang_s[i,j] yrot = np.array(1.0) # self.ang_c[i,j] u,v = self.mplot.rotate_vector(xrot,yrot,self.lons[i,j],self.lats[i,j]) # vertical on grid (0,1) xrot = -self.ang_c[i,j] yrot = -self.ang_s[i,j] # # horizontal on grid (1,0) # xrot = -self.ang_s[i,j] # yrot = self.ang_c[i,j] u1,v1 = self.mplot.rotate_vector(xrot,yrot,self.lons[i,j],self.lats[i,j]) return u,v,u1,v1,Out1[0],Out1[1] elif type(point) == list: # returns two normalised vectors i = point[0] j = point[1] # line1 starts at point # goes in direction to j+1 (+ve x) xvec = self.xpts[i,j+1] - self.xpts[i,j] yvec = self.ypts[i,j+1] - self.ypts[i,j] # print(xvec,yvec) # angles are between positive x and due north clockwise # xrot = self.ang_c[i,j]*xvec + self.ang_s[i,j]*yvec # yrot = self.ang_c[i,j]*yvec - self.ang_s[i,j]*xvec # rotation is -pi/4 + rotation xrot = xyvec, yrot = -xvec xrot = self.ang_c[i,j]*yvec - self.ang_s[i,j]*xvec yrot = -self.ang_c[i,j]*xvec - self.ang_s[i,j]*yvec # print(xrot,yrot) print(np.rad2deg(np.arctan2(self.ang_s[i,j],self.ang_c[i,j]))) Out1 = (self.xpts[i,j],self.ypts[i,j]) Out2 = (Out1[0] + xvec*scale,Out1[1] + yvec*scale) Out3 = (Out1[0] + xrot*scale,Out1[1] + yrot*scale) # return the list of x,y's needed for plot return ([Out1[0],Out2[0]], [Out1[1],Out2[1]]),([Out1[0],Out3[0]], [Out1[1],Out3[1]]) # line2 starts at point # goes in direction - j+1 plus rotation def rotate_vectors_to_plot(self,xvec,yvec): """ utilises the ang_c and ang_s arrays along with the associated projection """ # ur,vr will be in lon/lat # ur = xvec*self.ang_c + yvec*self.ang_s # vr = yvec*self.ang_c - xvec*self.ang_s # test ur = -yvec*self.ang_c - xvec*self.ang_s vr = xvec*self.ang_c - yvec*self.ang_s urr,vrr = self.mplot.rotate_vector(ur,vr,self.lons,self.lats) return urr,vrr def save_grid(self,file): if self.grid and self.gridinfo: # save lat/lon pts np.savez(file, lats = self.lats, lons = self.lons, xpts = self.xpts, ypts = self.ypts, dxRes = self.dxRes, dyRes = self.dyRes, m = self.m, n = self.n, ang_c = self.ang_c, ang_s = self.ang_s,
":scale"), (store_mul, ":green", 3 * 145, ":scale"), (store_mul, ":blue", 3 * 45, ":scale"), (val_div, ":red", 100), (val_div, ":green", 100), (val_div, ":blue", 100), (set_current_color,":red", ":green", ":blue"), (set_position_delta,0,0,0), (add_point_light_to_entity, 10, 30), ]), ]), ("light_red",sokf_invisible,"light_sphere","0", [(ti_on_scene_prop_init, [(store_trigger_param_1, ":prop_instance_no"), (set_fixed_point_multiplier, 100), (prop_instance_get_scale, pos5, ":prop_instance_no"), (position_get_scale_x, ":scale", pos5), (store_mul, ":red", 2 * 170, ":scale"), (store_mul, ":green", 2 * 100, ":scale"), (store_mul, ":blue", 2 * 30, ":scale"), (val_div, ":red", 100), (val_div, ":green", 100), (val_div, ":blue", 100), (set_current_color,":red", ":green", ":blue"), (set_position_delta,0,0,0), (add_point_light_to_entity, 20, 30), ]), ]), ("light_night",sokf_invisible,"light_sphere","0", [(ti_on_scene_prop_init, [(is_currently_night, 0), (store_trigger_param_1, ":prop_instance_no"), (set_fixed_point_multiplier, 100), (prop_instance_get_scale, pos5, ":prop_instance_no"), (position_get_scale_x, ":scale", pos5), (store_mul, ":red", 3 * 160, ":scale"), (store_mul, ":green", 3 * 145, ":scale"), (store_mul, ":blue", 3 * 100, ":scale"), (val_div, ":red", 100), (val_div, ":green", 100), (val_div, ":blue", 100), (set_current_color,":red", ":green", ":blue"), (set_position_delta,0,0,0), (add_point_light_to_entity, 10, 30), ]), ]), ("torch",0,"torch_a","0", [(ti_on_scene_prop_init, [(store_trigger_param_1, ":instance_id"), (prop_instance_play_sound, ":instance_id", "snd_fire_loop"), (set_position_delta,0,-35,48), (particle_system_add_new, "psys_torch_fire"), (particle_system_add_new, "psys_torch_smoke"), (particle_system_add_new, "psys_torch_fire_sparks"), (set_position_delta,0,-35,56), (particle_system_add_new, "psys_fire_glow_1"), (get_trigger_object_position, pos2), (set_position_delta,0,0,0), (position_move_y, pos2, -35), (position_move_z, pos2, 55), (particle_system_burst, "psys_fire_glow_fixed", pos2, 1), ]), ]), ("torch_night",0,"torch_a","0", [(ti_on_scene_prop_init, [(is_currently_night, 0), (store_trigger_param_1, ":instance_id"), (prop_instance_play_sound, ":instance_id", "snd_fire_loop"), (set_position_delta,0,-35,48), (particle_system_add_new, "psys_torch_fire"), (particle_system_add_new, "psys_torch_smoke"), (particle_system_add_new, "psys_torch_fire_sparks"), (set_position_delta,0,-35,56), (particle_system_add_new, "psys_fire_glow_1"), (particle_system_emit, "psys_fire_glow_1",9000000), ]), ]), ("barrier_20m",sokf_invisible|sokf_type_barrier,"barrier_20m","bo_barrier_20m", []), ("barrier_16m",sokf_invisible|sokf_type_barrier,"barrier_16m","bo_barrier_16m", []), ("barrier_8m" ,sokf_invisible|sokf_type_barrier,"barrier_8m" ,"bo_barrier_8m" , []), ("barrier_4m" ,sokf_invisible|sokf_type_barrier,"barrier_4m" ,"bo_barrier_4m" , []), ("barrier_2m" ,sokf_invisible|sokf_type_barrier,"barrier_2m" ,"bo_barrier_2m" , []), ("exit_4m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_4m" ,"bo_barrier_4m" , []), ("exit_8m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_8m" ,"bo_barrier_8m" , []), ("exit_16m" ,sokf_invisible|sokf_type_barrier_leave,"barrier_16m" ,"bo_barrier_16m" , []), ("ai_limiter_2m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_2m" ,"bo_barrier_2m" , []), ("ai_limiter_4m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_4m" ,"bo_barrier_4m" , []), ("ai_limiter_8m" ,sokf_invisible|sokf_type_ai_limiter,"barrier_8m" ,"bo_barrier_8m" , []), ("ai_limiter_16m",sokf_invisible|sokf_type_ai_limiter,"barrier_16m","bo_barrier_16m", []), ("shield",sokf_dynamic,"0","boshield", []), ("shelves",0,"shelves","boshelves", []), ("table_tavern",0,"table_tavern","botable_tavern", []), ("table_castle_a",0,"table_castle_a","bo_table_castle_a", []), ("chair_castle_a",0,"chair_castle_a","bo_chair_castle_a", []), ("pillow_a",0,"pillow_a","bo_pillow", []), ("pillow_b",0,"pillow_b","bo_pillow", []), ("pillow_c",0,"pillow_c","0", []), ("interior_castle_g_square_keep_b",0,"interior_castle_g_square_keep_b","bo_interior_castle_g_square_keep_b", []), ("carpet_with_pillows_a",0,"carpet_with_pillows_a","bo_carpet_with_pillows", []), ("carpet_with_pillows_b",0,"carpet_with_pillows_b","bo_carpet_with_pillows", []), ("table_round_a",0,"table_round_a","bo_table_round_a", []), ("table_round_b",0,"table_round_b","bo_table_round_b", []), ("fireplace_b",0,"fireplace_b","bo_fireplace_b", []), ("fireplace_c",0,"fireplace_c","bo_fireplace_c", []), ("sofa_a",0,"sofa_a","bo_sofa", []), ("sofa_b",0,"sofa_b","bo_sofa", []), ("ewer_a",0,"ewer_a","bo_ewer_a", []), ("end_table_a",0,"end_table_a","bo_end_table_a", []), ("fake_houses_steppe_a",0,"fake_houses_steppe_a","0", []), ("fake_houses_steppe_b",0,"fake_houses_steppe_b","0", []), ("fake_houses_steppe_c",0,"fake_houses_steppe_c","0", []), ("boat_destroy",0,"boat_destroy","bo_boat_destroy", []), ("destroy_house_a",0,"destroy_house_a","bo_destroy_house_a", []), ("destroy_house_b",0,"destroy_house_b","bo_destroy_house_b", []), ("destroy_house_c",0,"destroy_house_c","bo_destroy_house_c", []), ("destroy_heap",0,"destroy_heap","bo_destroy_heap", []), ("destroy_castle_a",0,"destroy_castle_a","bo_destroy_castle_a", []), ("destroy_castle_b",0,"destroy_castle_b","bo_destroy_castle_b", []), ("destroy_castle_c",0,"destroy_castle_c","bo_destroy_castle_c", []), ("destroy_castle_d",0,"destroy_castle_d","bo_destroy_castle_d", []), ("destroy_windmill",0,"destroy_windmill","bo_destroy_windmill", []), ("destroy_tree_a",0,"destroy_tree_a","bo_destroy_tree_a", []), ("destroy_tree_b",0,"destroy_tree_b","bo_destroy_tree_b", []), ("destroy_bridge_a",0,"destroy_bridge_a","bo_destroy_bridge_a", []), ("destroy_bridge_b",0,"destroy_bridge_b","bo_destroy_bridge_b", []), ("catapult",0,"Catapult","bo_Catapult", []), ("catapult_destructible",sokf_moveable|sokf_destructible,"Catapult","bo_Catapult", []), ("broom",0,"broom","0", []), ("garlic",0,"garlic","0", []), ("garlic_b",0,"garlic_b","0", []), ("destroy_a",0,"destroy_a","0", []), ("destroy_b",0,"destroy_b","0", []), ("bridge_wooden",0,"bridge_wooden","bo_bridge_wooden", []), ("bridge_wooden_snowy",0,"bridge_wooden_snowy","bo_bridge_wooden", []), ("grave_a",0,"grave_a","bo_grave_a", []), ("village_house_e",0,"village_house_e","bo_village_house_e", []), ("village_house_f",0,"village_house_f","bo_village_house_f", []), ("village_house_g",0,"village_house_g","bo_village_house_g", []), ("village_house_h",0,"village_house_h","bo_village_house_h", []), ("village_house_i",0,"village_house_i","bo_village_house_i", []), ("village_house_j",0,"village_house_j","bo_village_house_j", []), ("village_wall_a",0,"village_wall_a","bo_village_wall_a", []), ("village_wall_b",0,"village_wall_b","bo_village_wall_b", []), ("village_snowy_house_a",0,"village_snowy_house_a","bo_village_snowy_house_a", []), ("village_snowy_house_b",0,"village_snowy_house_b","bo_village_snowy_house_b", []), ("village_snowy_house_c",0,"village_snowy_house_c","bo_village_snowy_house_c", []), ("village_snowy_house_d",0,"village_snowy_house_d","bo_village_snowy_house_d", []), ("village_snowy_house_e",0,"village_snowy_house_e","bo_village_snowy_house_e", []), ("village_snowy_house_f",0,"village_snowy_house_f","bo_village_snowy_house_f", []), ("town_house_steppe_a",0,"town_house_steppe_a","bo_town_house_steppe_a", []), ("town_house_steppe_b",0,"town_house_steppe_b","bo_town_house_steppe_b", []), ("town_house_steppe_c",0,"town_house_steppe_c","bo_town_house_steppe_c", []), ("town_house_steppe_d",0,"town_house_steppe_d","bo_town_house_steppe_d", []), ("town_house_steppe_e",0,"town_house_steppe_e","bo_town_house_steppe_e", []), ("town_house_steppe_f",0,"town_house_steppe_f","bo_town_house_steppe_f", []), ("town_house_steppe_g",0,"town_house_steppe_g","bo_town_house_steppe_g", []), ("town_house_steppe_h",0,"town_house_steppe_h","bo_town_house_steppe_h", []), ("town_house_steppe_i",0,"town_house_steppe_i","bo_town_house_steppe_i", []), ("carpet_a",0,"carpet_a","0", []), ("carpet_b",0,"carpet_b","0", []), ("carpet_c",0,"carpet_c","0", []), ("carpet_d",0,"carpet_d","0", []), ("carpet_e",0,"carpet_e","0", []), ("carpet_f",0,"carpet_f","0", []), ("awning_a",0,"awning_a","bo_awning", []), ("awning_b",0,"awning_b","bo_awning", []), ("awning_c",0,"awning_c","bo_awning", []), ("awning_long",0,"awning_long","bo_awning_long", []), ("awning_long_b",0,"awning_long_b","bo_awning_long", []), ("awning_d",0,"awning_d","bo_awning_d", []), ("ship",0,"ship","bo_ship", []), ("ship_b",0,"ship_b","bo_ship_b", []), ("ship_c",0,"ship_c","bo_ship_c", []), ("ship_d",0,"ship_d","bo_ship_d", []), ("snowy_barrel_a",0,"snowy_barrel_a","bo_snowy_barrel_a", []), ("snowy_fence",0,"snowy_fence","bo_snowy_fence", []), ("snowy_wood_heap",0,"snowy_wood_heap","bo_snowy_wood_heap", []), ("village_snowy_stable_a",0,"village_snowy_stable_a","bo_village_snowy_stable_a", []), ("village_straw_house_a",0,"village_straw_house_a","bo_village_straw_house_a", []), ("village_stable_a",0,"village_stable_a","bo_village_stable_a", []), ("village_shed_a",0,"village_shed_a","bo_village_shed_a", []), ("village_shed_b",0,"village_shed_b","bo_village_shed_b", []), ("dungeon_door_cell_a",0,"dungeon_door_cell_a","bo_dungeon_door_cell_a", []), ("dungeon_door_cell_b",0,"dungeon_door_cell_b","bo_dungeon_door_cell_b", []), ("dungeon_door_entry_a",0,"dungeon_door_entry_a","bo_dungeon_door_entry_a", []), ("dungeon_door_entry_b",0,"dungeon_door_entry_b","bo_dungeon_door_entry_a", []), ("dungeon_door_entry_c",0,"dungeon_door_entry_c","bo_dungeon_door_entry_a", []), ("dungeon_door_direction_a",0,"dungeon_door_direction_a","bo_dungeon_door_direction_a", []), ("dungeon_door_direction_b",0,"dungeon_door_direction_b","bo_dungeon_door_direction_a", []), ("dungeon_door_stairs_a",0,"dungeon_door_stairs_a","bo_dungeon_door_stairs_a", []), ("dungeon_door_stairs_b",0,"dungeon_door_stairs_b","bo_dungeon_door_stairs_a", []), ("dungeon_bed_a",0,"dungeon_bed_a","0", []), ("dungeon_bed_b",0,"dungeon_bed_b","bo_dungeon_bed_b", []), ("torture_tool_a",0,"torture_tool_a","bo_torture_tool_a", []), ("torture_tool_b",0,"torture_tool_b","0", []), ("torture_tool_c",0,"torture_tool_c","bo_torture_tool_c", []), ("skeleton_head",0,"skeleton_head","0", []), ("skeleton_bone",0,"skeleton_bone","0", []), ("skeleton_a",0,"skeleton_a","bo_skeleton_a", []), ("dungeon_stairs_a",0,"dungeon_stairs_a","bo_dungeon_stairs_a", []), ("dungeon_stairs_b",0,"dungeon_stairs_b","bo_dungeon_stairs_a", []), ("dungeon_torture_room_a",0,"dungeon_torture_room_a","bo_dungeon_torture_room_a", []), ("dungeon_entry_a",0,"dungeon_entry_a","bo_dungeon_entry_a", []), ("dungeon_entry_b",0,"dungeon_entry_b","bo_dungeon_entry_b", []), ("dungeon_entry_c",0,"dungeon_entry_c","bo_dungeon_entry_c", []), ("dungeon_cell_a",0,"dungeon_cell_a","bo_dungeon_cell_a", []), ("dungeon_cell_b",0,"dungeon_cell_b","bo_dungeon_cell_b", []), ("dungeon_cell_c",0,"dungeon_cell_c_open","bo_dungeon_cell_c_open", []), ("dungeon_corridor_a",0,"dungeon_corridor_a","bo_dungeon_corridor_a", []), ("dungeon_corridor_b",0,"dungeon_corridor_b","bo_dungeon_corridor_b", []), ("dungeon_corridor_c",0,"dungeon_corridor_c","bo_dungeon_corridor_a", []), ("dungeon_corridor_d",0,"dungeon_corridor_d","bo_dungeon_corridor_b", []), ("dungeon_direction_a",0,"dungeon_direction_a","bo_dungeon_direction_a", []), ("dungeon_direction_b",0,"dungeon_direction_b","bo_dungeon_direction_a", []), ("dungeon_room_a",0,"dungeon_room_a","bo_dungeon_room_a", []), ("dungeon_tower_stairs_a",sokf_type_ladder,"dungeon_tower_stairs_a","bo_dungeon_tower_stairs_a", []), ("dungeon_tower_cell_a",0,"dungeon_tower_cell_a","bo_dungeon_tower_cell_a", []), ("tunnel_a",0,"tunnel_a","bo_tunnel_a", []), ("tunnel_salt",0,"tunnel_salt","bo_tunnel_salt", []), ("salt_a",0,"salt_a","bo_salt_a", []), ("door_destructible",sokf_moveable|sokf_destructible|spr_use_time(2),"tutorial_door_a","bo_tutorial_door_a", []), ("tutorial_door_a",sokf_moveable,"tutorial_door_a","bo_tutorial_door_a", []), ("tutorial_door_b",sokf_moveable,"tutorial_door_b","bo_tutorial_door_b", []), ("tutorial_flag_yellow",sokf_moveable|sokf_face_player,"tutorial_flag_yellow","0", []), ("tutorial_flag_red",sokf_moveable|sokf_face_player,"tutorial_flag_red","0", []), ("tutorial_flag_blue",sokf_moveable|sokf_face_player,"tutorial_flag_blue","0", []), ("interior_prison_a",0,"interior_prison_a","bo_interior_prison_a", []), ("interior_prison_b",0,"interior_prison_b","bo_interior_prison_b", []), ("interior_prison_cell_a",0,"interior_prison_cell_a","bo_interior_prison_cell_a", []), ("interior_prison_d",0,"interior_prison_d","bo_interior_prison_d", []), ("arena_archery_target_a",0,"arena_archery_target_a","bo_arena_archery_target_a", []), ("archery_butt_a",0,"archery_butt","bo_archery_butt", []), ("archery_target_with_hit_a",0,"arena_archery_target_a","bo_arena_archery_target_a", []), ("dummy_a",sokf_destructible|sokf_moveable,"arena_archery_target_b","bo_arena_archery_target_b", []), ("band_a",0,"band_a","0", []), ("arena_sign",0,"arena_arms","0", []), ("castle_h_battlement_a",0,"castle_h_battlement_a","bo_castle_h_battlement_a", []), ("castle_h_battlement_b",0,"castle_h_battlement_b","bo_castle_h_battlement_b", []), ("castle_h_battlement_c",0,"castle_h_battlement_c","bo_castle_h_battlement_c", []), ("castle_h_battlement_a2",0,"castle_h_battlement_a2","bo_castle_h_battlement_a2", []), ("castle_h_battlement_b2",0,"castle_h_battlement_b2","bo_castle_h_battlement_b2", []), ("castle_h_corner_a",0,"castle_h_corner_a","bo_castle_h_corner_a", []), ("castle_h_corner_c",0,"castle_h_corner_c","bo_castle_h_corner_c", []), ("castle_h_stairs_a",0,"castle_h_stairs_a","bo_castle_h_stairs_a", []), ("castle_h_stairs_b",0,"castle_h_stairs_b","bo_castle_h_stairs_b", []), ("castle_h_gatehouse_a",0,"castle_h_gatehouse_a","bo_castle_h_gatehouse_a", []), ("castle_h_keep_a",0,"castle_h_keep_a","bo_castle_h_keep_a", []), ("castle_h_keep_b",0,"castle_h_keep_b","bo_castle_h_keep_b", []), ("castle_h_house_a",0,"castle_h_house_a","bo_castle_h_house_a", []), ("castle_h_house_b",0,"castle_h_house_b","bo_castle_h_house_b", []), ("castle_h_house_c",0,"castle_h_house_c","bo_castle_h_house_b", []), ("castle_h_battlement_barrier",0,"castle_h_battlement_barrier","bo_castle_h_battlement_barrier", []), ("full_keep_b",0,"full_keep_b","bo_full_keep_b", []), ("castle_f_keep_a",0,"castle_f_keep_a","bo_castle_f_keep_a", []), ("castle_f_battlement_a",0,"castle_f_battlement_a","bo_castle_f_battlement_a", []), ("castle_f_battlement_a_destroyed",0,"castle_f_battlement_a_destroyed","bo_castle_f_battlement_a_destroyed", []), ("castle_f_battlement_b",0,"castle_f_battlement_b","bo_castle_f_battlement_b", []), ("castle_f_battlement_c",0,"castle_f_battlement_c","bo_castle_f_battlement_c", []), ("castle_f_battlement_d",0,"castle_f_battlement_d","bo_castle_f_battlement_d", []), ("castle_f_battlement_e",0,"castle_f_battlement_e","bo_castle_f_battlement_e", []), ("castle_f_sally_port_elevation",0,"castle_f_sally_port_elevation","bo_castle_f_sally_port_elevation", []), ("castle_f_battlement_corner_a",0,"castle_f_battlement_corner_a","bo_castle_f_battlement_corner_a_fixed", []), ("castle_f_battlement_corner_b",0,"castle_f_battlement_corner_b","bo_castle_f_battlement_corner_b", []), ("castle_f_battlement_corner_c",0,"castle_f_battlement_corner_c","bo_castle_f_battlement_corner_c", []), ("castle_f_door_a",sokf_moveable|sokf_destructible|spr_use_time(0),"castle_f_door_a","bo_castle_f_door_a", []), ("castle_f_doors_top_a",0,"castle_f_doors_top_a","bo_castle_f_doors_top_a", []), ("castle_f_sally_door_a",sokf_moveable|sokf_destructible|spr_use_time(0),"castle_f_sally_door_a","bo_castle_f_sally_door_a", []), ("castle_f_stairs_a",sokf_type_ladder,"castle_f_stairs_a","bo_castle_f_stairs_a", []), ("castle_f_tower_a",0,"castle_f_tower_a","bo_castle_f_tower_a", []), ("castle_f_wall_stairs_a",sokf_type_ladder,"castle_f_wall_stairs_a","bo_castle_f_wall_stairs_a", []), ("castle_f_wall_stairs_b",sokf_type_ladder,"castle_f_wall_stairs_b","bo_castle_f_wall_stairs_b", []), ("castle_f_wall_way_a",0,"castle_f_wall_way_a","bo_castle_f_wall_way_a", []), ("castle_f_wall_way_b",0,"castle_f_wall_way_b","bo_castle_f_wall_way_b", []), ("castle_f_gatehouse_a",0,"castle_f_gatehouse_a","bo_castle_f_gatehouse_a", []), ("castle_g_battlement_a",0,"castle_g_battlement_a","bo_castle_g_battlement_a", []), ("castle_g_battlement_a1",0,"castle_g_battlement_a1","bo_castle_g_battlement_a1", []), ("castle_g_battlement_c",0,"castle_g_battlement_c","bo_castle_g_battlement_c", []), ("castle_g_corner_a",0,"castle_g_corner_a","bo_castle_g_corner_a", []), ("castle_g_corner_c",0,"castle_g_corner_c","bo_castle_g_corner_c", []), ("castle_g_tower_a",0,"castle_g_tower_a","bo_castle_g_tower_a", []), ("castle_g_gate_house",0,"castle_g_gate_house","bo_castle_g_gate_house", []), ("castle_g_gate_house_door_a",0,"castle_g_gate_house_door_a","bo_castle_g_gate_house_door_a", []), ("castle_g_gate_house_door_b",0,"castle_g_gate_house_door_b","bo_castle_g_gate_house_door_b", []), ("castle_g_square_keep_a",0,"castle_g_square_keep_a","bo_castle_g_square_keep_a", []), ("castle_i_battlement_a",0,"castle_i_battlement_a","bo_castle_i_battlement_a", []), ("castle_i_battlement_a1",0,"castle_i_battlement_a1","bo_castle_i_battlement_a1", []), ("castle_i_battlement_c",0,"castle_i_battlement_c","bo_castle_i_battlement_c", []), ("castle_i_corner_a",0,"castle_i_corner_a","bo_castle_i_corner_a", []), ("castle_i_corner_c",0,"castle_i_corner_c","bo_castle_i_corner_c", []), ("castle_i_tower_a",0,"castle_i_tower_a","bo_castle_i_tower_a", []), ("castle_i_gate_house",0,"castle_i_gate_house","bo_castle_i_gate_house", []), ("castle_i_gate_house_door_a",0,"castle_i_gate_house_door_a","bo_castle_i_gate_house_door_a", []), ("castle_i_gate_house_door_b",0,"castle_i_gate_house_door_b","bo_castle_i_gate_house_door_b", []), ("castle_i_square_keep_a",0,"castle_i_square_keep_a","bo_castle_i_square_keep_a", []), ("mosque_a",0,"mosque_a","bo_mosque_a", []), ("stone_minaret_a",0,"stone_minaret_a","bo_stone_minaret_a", []), ("stone_house_a",0,"stone_house_a","bo_stone_house_a", []), ("stone_house_b",0,"stone_house_b","bo_stone_house_b", []), ("stone_house_c",0,"stone_house_c","bo_stone_house_c", []), ("stone_house_d",0,"stone_house_d","bo_stone_house_d", []), ("stone_house_e",0,"stone_house_e","bo_stone_house_e", []), ("stone_house_f",0,"stone_house_f","bo_stone_house_f", []), ("banner_pole", sokf_moveable, "banner_pole", "bo_banner_pole", []), ("custom_banner_01",0,"pw_banner_castle","0", []), ("custom_banner_02",0,"pw_banner_castle","0", []), ("banner_a01",0,"pw_banner_a01","0", []), ("banner_a02",0,"pw_banner_a02","0", []), ("banner_a03",0,"pw_banner_a03","0", []), ("banner_a04",0,"pw_banner_a04","0", []), ("banner_a05",0,"pw_banner_a05","0", []), ("banner_a06",0,"pw_banner_a06","0", []), ("banner_a07",0,"pw_banner_a07","0", []), ("banner_a08",0,"pw_banner_a08","0", []), ("banner_a09",0,"pw_banner_a09","0", []), ("banner_a10",0,"pw_banner_a10","0", []), ("banner_a11",0,"pw_banner_a11","0", []), ("banner_a12",0,"pw_banner_a12","0", []), ("banner_a13",0,"pw_banner_a13","0", []), ("banner_a14",0,"pw_banner_a14","0", []), ("banner_a15",0,"pw_banner_a15","0", []), ("banner_a16",0,"pw_banner_a16","0", []), ("banner_a17",0,"pw_banner_a17","0", []), ("banner_a18",0,"pw_banner_a18","0", []), ("banner_a19",0,"pw_banner_a19","0", []), ("banner_a20",0,"pw_banner_a20","0", []), ("banner_a21",0,"pw_banner_a21","0", []), ("banner_b01",0,"pw_banner_b01","0", []), ("banner_b02",0,"pw_banner_b02","0", []), ("banner_b03",0,"pw_banner_b03","0", []), ("banner_b04",0,"pw_banner_b04","0", []), ("banner_b05",0,"pw_banner_b05","0", []), ("banner_b06",0,"pw_banner_b06","0", []), ("banner_b07",0,"pw_banner_b07","0", []), ("banner_b08",0,"pw_banner_b08","0", []), ("banner_b09",0,"pw_banner_b09","0", []), ("banner_b10",0,"pw_banner_b10","0", []), ("banner_b11",0,"pw_banner_b11","0", []), ("banner_b12",0,"pw_banner_b12","0", []), ("banner_b13",0,"pw_banner_b13","0", []), ("banner_b14",0,"pw_banner_b14","0", []), ("banner_b15",0,"pw_banner_b15","0", []), ("banner_b16",0,"pw_banner_b16","0", []), ("banner_b17",0,"pw_banner_b17","0", []), ("banner_b18",0,"pw_banner_b18","0", []), ("banner_b19",0,"pw_banner_b19","0", []), ("banner_b20",0,"pw_banner_b20","0", []), ("banner_b21",0,"pw_banner_b21","0", []), ("banner_c01",0,"pw_banner_c01","0", []), ("banner_c02",0,"pw_banner_c02","0", []), ("banner_c03",0,"pw_banner_c03","0", []), ("banner_c04",0,"pw_banner_c04","0", []), ("banner_c05",0,"pw_banner_c05","0", []), ("banner_c06",0,"pw_banner_c06","0", []), ("banner_c07",0,"pw_banner_c07","0", []), ("banner_c08",0,"pw_banner_c08","0", []), ("banner_c09",0,"pw_banner_c09","0", []), ("banner_c10",0,"pw_banner_c10","0", []), ("banner_c11",0,"pw_banner_c11","0", []), ("banner_c12",0,"pw_banner_c12","0", []), ("banner_c13",0,"pw_banner_c13","0", []), ("banner_c14",0,"pw_banner_c14","0", []), ("banner_c15",0,"pw_banner_c15","0", []), ("banner_c16",0,"pw_banner_c16","0", []), ("banner_c17",0,"pw_banner_c17","0", []), ("banner_c18",0,"pw_banner_c18","0", []), ("banner_c19",0,"pw_banner_c19","0", []), ("banner_c20",0,"pw_banner_c20","0", []), ("banner_c21",0,"pw_banner_c21","0", []), ("banner_d01",0,"pw_banner_d01","0", []), ("banner_d02",0,"pw_banner_d02","0", []), ("banner_d03",0,"pw_banner_d03","0", []), ("banner_d04",0,"pw_banner_d04","0", []), ("banner_d05",0,"pw_banner_d05","0", []), ("banner_d06",0,"pw_banner_d06","0", []), ("banner_d07",0,"pw_banner_d07","0", []), ("banner_d08",0,"pw_banner_d08","0", []), ("banner_d09",0,"pw_banner_d09","0", []), ("banner_d10",0,"pw_banner_d10","0", []), ("banner_d11",0,"pw_banner_d11","0", []), ("banner_d12",0,"pw_banner_d12","0", []), ("banner_d13",0,"pw_banner_d13","0", []), ("banner_d14",0,"pw_banner_d14","0", []), ("banner_d15",0,"pw_banner_d15","0", []), ("banner_d16",0,"pw_banner_d16","0", []), ("banner_d17",0,"pw_banner_d17","0", []), ("banner_d18",0,"pw_banner_d18","0", []), ("banner_d19",0,"pw_banner_d19","0", []), ("banner_d20",0,"pw_banner_d20","0", []), ("banner_d21",0,"pw_banner_d21","0", []), ("banner_e01",0,"pw_banner_e01","0", []), ("banner_e02",0,"pw_banner_e02","0", []), ("banner_e03",0,"pw_banner_e03","0", []), ("banner_e04",0,"pw_banner_e04","0", []), ("banner_e05",0,"pw_banner_e05","0", []), ("banner_e06",0,"pw_banner_e06","0", []), ("banner_e07",0,"pw_banner_e07","0", []), ("banner_e08",0,"pw_banner_e08","0", []), ("banner_e09",0,"pw_banner_e09","0", []), ("banner_e10",0,"pw_banner_e10","0", []), ("banner_e11",0,"pw_banner_e11","0", []), ("banner_e12",0,"pw_banner_e12","0", []), ("banner_e13",0,"pw_banner_e13","0", []), ("banner_e14",0,"pw_banner_e14","0", []), ("banner_e15",0,"pw_banner_e15","0", []), ("banner_e16",0,"pw_banner_e16","0", []), ("banner_e17",0,"pw_banner_e17","0", []), ("banner_e18",0,"pw_banner_e18","0", []), ("banner_e19",0,"pw_banner_e19","0", []), ("banner_e20",0,"pw_banner_e20","0", []), ("banner_e21",0,"pw_banner_e21","0", []), ("banner_f01",0,"pw_banner_f01","0", []), ("banner_f02",0,"pw_banner_f02","0", []), ("banner_f03",0,"pw_banner_f03","0", []), ("banner_f04",0,"pw_banner_f04","0", []), ("banner_f05",0,"pw_banner_f05","0", []), ("banner_f06",0,"pw_banner_f06","0", []), ("banner_f07",0,"pw_banner_f07","0", []), ("banner_f08",0,"pw_banner_f08","0", []), ("banner_f09",0,"pw_banner_f09","0", []), ("banner_f10",0,"pw_banner_f10","0", []), ("banner_f11",0,"pw_banner_f11","0", []), ("banner_f12",0,"pw_banner_f12","0", []), ("banner_f13",0,"pw_banner_f13","0", []), ("banner_f14",0,"pw_banner_f14","0", []), ("banner_f15",0,"pw_banner_f15","0", []), ("banner_f16",0,"pw_banner_f16","0", []), ("banner_f17",0,"pw_banner_f17","0", []), ("banner_f18",0,"pw_banner_f18","0", []), ("banner_f19",0,"pw_banner_f19","0", []), ("banner_f20",0,"pw_banner_f20","0", []), ("banner_f21",0,"pw_banner_f21","0", []), ("banner_g01",0,"pw_banner_g01","0", []), ("banner_g02",0,"pw_banner_g02","0", []), ("banner_g03",0,"pw_banner_g03","0", []), ("banner_g04",0,"pw_banner_g04","0", []), ("banner_g05",0,"pw_banner_g05","0", []), ("banner_g06",0,"pw_banner_g06","0", []), ("banner_g07",0,"pw_banner_g07","0", []), ("banner_g08",0,"pw_banner_g08","0", []), ("banner_g09",0,"pw_banner_g09","0", []), ("banner_g10",0,"pw_banner_g10","0", []), ("banner_g11",0,"pw_banner_g11","0", []), ("banner_g12",0,"pw_banner_g12","0", []), ("banner_g13",0,"pw_banner_g13","0", []), ("banner_g14",0,"pw_banner_g14","0", []), ("banner_g15",0,"pw_banner_g15","0", []), ("banner_g16",0,"pw_banner_g16","0", []), ("banner_g17",0,"pw_banner_g17","0", []), ("banner_g18",0,"pw_banner_g18","0", []), ("banner_g19",0,"pw_banner_g19","0", []), ("banner_g20",0,"pw_banner_g20","0", []), ("banner_g21",0,"pw_banner_g21","0", []), ("banner_h01",0,"pw_banner_h01","0", []), ("banner_h02",0,"pw_banner_h02","0", []), ("banner_h03",0,"pw_banner_h03","0", []), ("banner_h04",0,"pw_banner_h04","0", []), ("banner_h05",0,"pw_banner_h05","0", []), ("banner_h06",0,"pw_banner_h06","0", []), ("banner_h07",0,"pw_banner_h07","0", []), ("banner_h08",0,"pw_banner_h08","0", []), ("banner_h09",0,"pw_banner_h09","0", []), ("banner_h10",0,"pw_banner_h10","0", []), ("banner_h11",0,"pw_banner_h11","0", []), ("banner_h12",0,"pw_banner_h12","0", []), ("banner_h13",0,"pw_banner_h13","0", []), ("banner_h14",0,"pw_banner_h14","0", []), ("banner_h15",0,"pw_banner_h15","0", []), ("banner_h16",0,"pw_banner_h16","0", []), ("banner_h17",0,"pw_banner_h17","0", []), ("banner_h18",0,"pw_banner_h18","0", []), ("banner_h19",0,"pw_banner_h19","0", []), ("banner_h20",0,"pw_banner_h20","0", []), ("banner_h21",0,"pw_banner_h21","0", []), ("banner_i01",0,"pw_banner_i01","0", []), ("banner_i02",0,"pw_banner_i02","0", []), ("banner_i03",0,"pw_banner_i03","0", []), ("banner_i04",0,"pw_banner_i04","0", []), ("banner_i05",0,"pw_banner_i05","0", []), ("banner_i06",0,"pw_banner_i06","0", []), ("banner_i07",0,"pw_banner_i07","0", []), ("banner_i08",0,"pw_banner_i08","0", []), ("banner_i09",0,"pw_banner_i09","0", []), ("banner_i10",0,"pw_banner_i10","0", []), ("banner_i11",0,"pw_banner_i11","0", []), ("banner_i12",0,"pw_banner_i12","0", []), ("banner_i13",0,"pw_banner_i13","0", []), ("banner_i14",0,"pw_banner_i14","0", []), ("banner_i15",0,"pw_banner_i15","0", []), ("banner_i16",0,"pw_banner_i16","0", []), ("banner_i17",0,"pw_banner_i17","0", []), ("banner_i18",0,"pw_banner_i18","0", []), ("banner_i19",0,"pw_banner_i19","0", []), ("banner_i20",0,"pw_banner_i20","0", []), ("banner_i21",0,"pw_banner_i21","0", []), ("banner_j01",0,"pw_banner_j01","0", []), ("banner_j02",0,"pw_banner_j02","0", []), ("banner_j03",0,"pw_banner_j03","0", []), ("banner_j04",0,"pw_banner_j04","0", []), ("banner_j05",0,"pw_banner_j05","0", []), ("banner_j06",0,"pw_banner_j06","0", []), ("banner_j07",0,"pw_banner_j07","0", []), ("banner_j08",0,"pw_banner_j08","0", []), ("banner_j09",0,"pw_banner_j09","0", []), ("banner_j10",0,"pw_banner_j10","0", []), ("banner_j11",0,"pw_banner_j11","0", []), ("banner_j12",0,"pw_banner_j12","0", []), ("banner_j13",0,"pw_banner_j13","0", []), ("banner_j14",0,"pw_banner_j14","0", []), ("banner_j15",0,"pw_banner_j15","0", []), ("banner_j16",0,"pw_banner_j16","0", []), ("banner_j17",0,"pw_banner_j17","0", []), ("banner_j18",0,"pw_banner_j18","0", []), ("banner_j19",0,"pw_banner_j19","0", []), ("banner_j20",0,"pw_banner_j20","0", []), ("banner_j21",0,"pw_banner_j21","0", []), ("banner_k01",0,"pw_banner_k01","0", []), ("banner_k02",0,"pw_banner_k02","0", []), ("banner_k03",0,"pw_banner_k03","0", []), ("banner_k04",0,"pw_banner_k04","0", []), ("banner_k05",0,"pw_banner_k05","0", []), ("banner_k06",0,"pw_banner_k06","0", []), ("banner_k07",0,"pw_banner_k07","0", []), ("banner_k08",0,"pw_banner_k08","0", []), ("banner_k09",0,"pw_banner_k09","0", []), ("banner_k10",0,"pw_banner_k10","0", []), ("banner_k11",0,"pw_banner_k11","0", []), ("banner_k12",0,"pw_banner_k12","0", []), ("banner_k13",0,"pw_banner_k13","0", []), ("banner_k14",0,"pw_banner_k14","0", []), ("banner_k15",0,"pw_banner_k15","0", []), ("banner_k16",0,"pw_banner_k16","0", []), ("banner_k17",0,"pw_banner_k17","0", []), ("banner_k18",0,"pw_banner_k18","0", []), ("banner_k19",0,"pw_banner_k19","0", []), ("banner_k20",0,"pw_banner_k20","0", []), ("banner_k21",0,"pw_banner_k21","0", []), ("banner_l01",0,"pw_banner_l01","0", []), ("banner_l02",0,"pw_banner_l02","0", []), ("banner_l03",0,"pw_banner_l03","0", []), ("banner_l04",0,"pw_banner_l04","0", []), ("banner_l05",0,"pw_banner_l05","0", []), ("banner_l06",0,"pw_banner_l06","0", []), ("banner_l07",0,"pw_banner_l07","0", []), ("banner_l08",0,"pw_banner_l08","0", []), ("banner_l09",0,"pw_banner_l09","0", []), ("banner_l10",0,"pw_banner_l10","0", []), ("banner_l11",0,"pw_banner_l11","0", []), ("banner_l12",0,"pw_banner_l12","0", []), ("banner_l13",0,"pw_banner_l13","0", []), ("banner_l14",0,"pw_banner_l14","0", []), ("banner_l15",0,"pw_banner_l15","0", []), ("banner_l16",0,"pw_banner_l16","0", []), ("banner_l17",0,"pw_banner_l17","0", []), ("banner_l18",0,"pw_banner_l18","0", []), ("banner_l19",0,"pw_banner_l19","0", []), ("banner_l20",0,"pw_banner_l20","0", []), ("banner_l21",0,"pw_banner_l21","0", []), ("banner_m01",0,"pw_banner_m01","0", []), ("banner_m02",0,"pw_banner_m02","0", []), ("banner_m03",0,"pw_banner_m03","0", []), ("banner_m04",0,"pw_banner_m04","0", []), ("banner_m05",0,"pw_banner_m05","0", []), ("banner_m06",0,"pw_banner_m06","0", []), ("banner_m07",0,"pw_banner_m07","0", []), ("banner_m08",0,"pw_banner_m08","0", []), ("banner_m09",0,"pw_banner_m09","0", []), ("banner_m10",0,"pw_banner_m10","0", []), ("banner_m11",0,"pw_banner_m11","0", []), ("banner_m12",0,"pw_banner_m12","0", []), ("banner_m13",0,"pw_banner_m13","0", []), ("banner_m14",0,"pw_banner_m14","0", []), ("banner_m15",0,"pw_banner_m15","0", []), ("banner_m16",0,"pw_banner_m16","0", []), ("banner_m17",0,"pw_banner_m17","0", []), ("banner_m18",0,"pw_banner_m18","0", []), ("banner_m19",0,"pw_banner_m19","0", []), ("banner_m20",0,"pw_banner_m20","0", []), ("banner_m21",0,"pw_banner_m21","0", []), ("banner_n01",0,"pw_banner_n01","0", []), ("banner_n02",0,"pw_banner_n02","0", []), ("banner_n03",0,"pw_banner_n03","0", []), ("banner_n04",0,"pw_banner_n04","0", []), ("banner_n05",0,"pw_banner_n05","0", []), ("banner_n06",0,"pw_banner_n06","0", []), ("banner_n07",0,"pw_banner_n07","0", []), ("banner_n08",0,"pw_banner_n08","0", []), ("banner_n09",0,"pw_banner_n09","0", []), ("banner_n10",0,"pw_banner_n10","0", []), ("banner_n11",0,"pw_banner_n11","0", []), ("banner_n12",0,"pw_banner_n12","0", []), ("banner_n13",0,"pw_banner_n13","0", []), ("banner_n14",0,"pw_banner_n14","0", []), ("banner_n15",0,"pw_banner_n15","0", []), ("banner_n16",0,"pw_banner_n16","0", []), ("banner_n17",0,"pw_banner_n17","0", []), ("banner_n18",0,"pw_banner_n18","0", []), ("banner_n19",0,"pw_banner_n19","0", []), ("banner_n20",0,"pw_banner_n20","0", []), ("banner_n21",0,"pw_banner_n21","0", []), ("banner_kingdom_a", 0, "pw_banner_kingdom_a", "0", []), ("banner_kingdom_b", 0, "pw_banner_kingdom_b", "0", []), ("banner_kingdom_c", 0, "pw_banner_kingdom_c", "0", []), ("banner_kingdom_d", 0, "pw_banner_kingdom_d", "0", []), ("banner_kingdom_e", 0,
#!/usr/bin/env python3 __version__ = "0.1.4" import argparse import operator import logging import gzip import sys from stringmeup import taxonomy from dataclasses import dataclass from os import path logging.basicConfig( format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d [%H:%M:%S]') log = logging.getLogger(path.basename(__file__)) # TODO: make sure confidence_threshold is between 0 and 1 # TODO: For the verbose output, also output (1) the number of kmers that hit in total, (2) the number of non-ambiguous kmers (queried). @dataclass class ReadClassification: current_node: int = None original_conf: float = None recalculated_conf: float = None original_taxid: int = None reclassified_taxid: int = None original_rank_code: str = None reclassified_rank_code: str = None original_name: str = None reclassified_name: str = None reclassified_distance: int = None id: str = None length: str = None kmer_string: str = None classified: bool = False max_confidence = None minimizer_hit_groups = None @dataclass class ReportNode: ratio: str hits_at_clade: int hits_at_node: int rank_code: str rank_depth: int node_taxid: int name: str offset: int def validate_input_file(putative_classifications_file, verbose_input, minimum_hit_groups, paired_input): """ Perform simple validation of the input file. """ log.debug('Validating input classifications file.') if not path.isfile(putative_classifications_file): log.error('Cannot find the specified file ({file}).'.format( file=putative_classifications_file)) sys.exit() with read_file(putative_classifications_file) as f: line = f.readline() line_proc = line.strip() line_proc = line_proc.split('\t') # The following should be the case of a Kraken 2 output file # First, check so the number of columns in the input file conforms to the expected number if not verbose_input: num_cols = len(line_proc) == 5 # original type of kraken2 output file else: num_cols = len(line_proc) == 6 # 6 columns if the output was produced with the verbose version of kraken2 that outputs minimizer hit groups # Line must start with C or U (as in Classified/unclassified) line_start = line_proc[0] in ['U', 'C'] # If the data is paired if paired_input: # Must be information on both sides of the pipe character data_col_1 = len(line_proc[3].split('|')) == 2 # If the data is paired in the 3rd column, it must also be paired in the last column if "|" in line_proc[-1]: data_col_2 = len(line_proc[-1].split('|:|')) == 2 else: data_col_2 = False # If the input is from single end reads, atleast the read length column (3rd) must be an int else: try: int(line_proc[3]) except: data_col_1 = False else: data_col_1 = True # And the last column should contain colons between kmer/taxon pairs if ":" in line_proc[4]: data_col_2 = True else: data_col_2 = False if num_cols and line_start and data_col_1 and data_col_2: log.debug('Validation OK.') return else: log.error('The classifications file is malformatted.') log.debug('First line of input: {}'.format(line)) log.debug('num_cols: {}'.format(num_cols)) log.debug('line_start: {}'.format(line_start)) log.debug('data_col_1: {}'.format(data_col_1)) log.debug('data_col_1: {}'.format(data_col_2)) sys.exit() def is_paired_input(classifications_file): """ Returns true if input file appears to contain paired read data. """ with read_file(classifications_file) as f: line = f.readline() line_proc = line.strip() line_proc = line_proc.split('\t') # If column 4 contains a pipe character "|", the data is paired if "|" in line_proc[3]: return True def is_verbose_input(classifications_file): """ Returns true if input file consists of 6 columns instead of 5. """ with read_file(classifications_file) as f: line = f.readline() line_proc = line.strip() line_proc = line_proc.split('\t') if len(line_proc) == 6: return True else: return False def process_kmer_string(kmer_info_string, paired_input): """ Process a kmer info string (last column of a Kraken 2 output file), so that we get a dictionary mapping of tax_ids to total sum of kmer hits. Returns: {tax_id_#1: X kmer hits, tax_id_#2: Y kmer hits, ... tax_id_#N: Z kmer hits} """ kmer_info_string = kmer_info_string.split() # Kraken2 classifications file for paired data contain the "|:|" delimiter if paired_input: kmer_info_string.remove('|:|') # Messy list comprehension. Converts all "taxa":"num_kmer" string pairs # into integer tuples like (taxa, num_kmers), and saves them in a list. # Ambiguous kmers are not processed (discarded). kmer_classifications = [ (int(x[0]), int(x[1])) for x in ( kmer_info.split(':') for kmer_info in kmer_info_string) if x[0] != 'A'] # Further processes the (taxa, num_kmers) tuples into a dict where each # tax_id stores the total sum of kmer hits to that tax_id. taxa_kmer_dict = {} for kmer_info in kmer_classifications: if kmer_info[0] not in taxa_kmer_dict: taxa_kmer_dict[kmer_info[0]] = kmer_info[1] else: taxa_kmer_dict[kmer_info[0]] += kmer_info[1] return taxa_kmer_dict def reclassify_read(read, confidence_threshold, taxonomy_tree, verbose_input, minimum_hit_groups, taxa_lineages, paired_input): """ Sums the number of kmers that hit in the clade rooted at "current_node", and divides it with the total number of kmers queried against the database: confidence = clade_kmer_hits / total_kmer_hits If the confidence at a specific node is < confidence_threshold, we go one step up the taxonomy (to the parent node) and recalculates the confidence. This is repeated until confidence >= confidence_threshold. In this function it's envisionable to include other parameters for the classification... Right now I'm only considering the confidence score and minimum hit groups. """ # Process the kmer string into a dict of {tax_id: #kmers} key, value pairs taxa_kmer_dict = process_kmer_string(read.kmer_string, paired_input) # Make the current node the same as the original classification read.current_node = read.original_taxid # The total number of kmers that were interrogated against the # database (non-ambiguous): total_kmer_hits = sum(taxa_kmer_dict.values()) # Only interested in tax_ids that are in the database. A '0' signifies that # the kmer could not be assigned to any tax_id (missing from database). assigned_taxa_set = set( [tax_id for tax_id in taxa_kmer_dict.keys() if tax_id != 0]) # Make a quick check to see if it is even possible to obtain the confidence # needed to make a classification. If it isn't we don't have to go through # the hassle of calculating the confidence at all parent nodes. Potentially # saving us a lot of time. doomed_to_fail = False total_hits = sum(taxa_kmer_dict[tax_id] for tax_id in assigned_taxa_set) max_confidence = total_hits / total_kmer_hits read.max_confidence = max_confidence # The read can't achieve a confidence high enough, so we mark it if max_confidence < confidence_threshold: doomed_to_fail = True # Filter minimizer_hit_groups if verbose_input: if read.minimizer_hit_groups < minimum_hit_groups: doomed_to_fail = True # The nr of kmers that hit within the clade rooted at the current node: num_hits_within_clade = 0 while not read.classified: taxa_in_clade = set() # For each tax_id that kmers in the read were assigned to: for tax_id in assigned_taxa_set: # Get the lineage (all ancestors including itself) for the tax_id: if tax_id in taxa_lineages: lineage = taxa_lineages[tax_id] else: lineage = taxonomy_tree.get_lineage([tax_id])[tax_id] # Save lineage so we don't have to get it from taxonomy_tree # more than once. Also make it into a set, which is faster to # query (the order of tax_ids in the lineage is not important # here). taxa_lineages[tax_id] = set(lineage) # If the currently classified (read.current_node) tax_id is in the # lineage (parents) of tax_id, then tax_id must be in the clade # rooted at read.current_node - i.e. tax_id is a descendant of # current_node. if read.current_node in lineage: # There is no need to get the lineage of tax_id in future # iterations since it will always be in the clade rooted at # read.current_node (we only ever go up in the taxonomy). # Remember which tax_ids we have counted, so we can remove them # from the set, outside of the loop: taxa_in_clade.add(tax_id) # Instead, we just add the kmers that hit tax_id to the total # hits at the clade: num_hits_within_clade += taxa_kmer_dict[tax_id] # Remove the already counted tax_ids: if taxa_in_clade: assigned_taxa_set -= taxa_in_clade # The confidence value for the read pair classification at the current # taxonomic level: read.recalculated_conf = num_hits_within_clade / total_kmer_hits # Set the original confidence score if not read.original_conf: read.original_conf = read.recalculated_conf # If we can't achieve the confidence score cutoff, now is the time # to exit the loop and return the read (since we have calculated # the original confidence). if doomed_to_fail: read.recalculated_conf = max_confidence read.reclassified_taxid = 0 return read, taxa_lineages # If the confidence at this node is sufficient, we classify it to # the current node (TaxID). if read.recalculated_conf >= confidence_threshold: read.classified = True read.reclassified_taxid = read.current_node return read, taxa_lineages # If the current node is the root, we stop (can't go higher up in the #
import csv import glob import hashlib import io import json import logging import os import random import sys import time from collections import defaultdict, Counter from datetime import datetime, timedelta from uuid import uuid4 from web.logger import WebLogger from web.mturk import MTurkClient from web.utils import SourcingState, UserStatus, generate_csv COLLECTION_MODES = ["plain", "adv", "validation"] random.seed(42) class Dataset(object): def __init__(self, items): self.items = items self.item_ids = list(self.items.keys()) self.indicator = 0 def __getitem__(self, val): if isinstance(val, str): return self.items[val] elif isinstance(val, slice): return self.item_ids[val] else: raise ValueError(f"unknown variable type for __getitem__: {type(val)}") def get_next_data(self, size): return [self.items[val] for val in self.get_next_data_ids(size)] def get_next_data_ids(self, size): return_data = None if size >= len(self.item_ids): return_data = self.item_ids * (size // len(self.item_ids)) next_index = size % len(self.item_ids) return_data += self.item_ids[self.indicator : self.indicator + next_index] self.indicator += next_index self.logger.info("writing item_ids iterated") elif size + self.indicator > len(self.item_ids): return_data = self.item_ids[self.indicator :] next_index = size - len(return_data) return_data = self.item_ids[:next_index] self.indicator = next_index self.logger.info("writing item_ids iterated") else: # size + self.indicator <= len(self.item_ids): return_data = self.item_ids[self.indicator : self.indicator + size] self.indicator += size self.indicator %= len(self.item_ids) return return_data class Backend(object): @classmethod def get_backend(cls, app): backend = getattr(app.state, "_backend", None) if backend is None: app.state._backend = cls( app.state.args.log_file, app.state.args.save_dir, app.state.args.run_mode, app.state.mturk_config, app.state.args.hit_config, app.state.args.resume, annotation=app.state.args.annotation, worker_log_path=app.state.args.worker_log_path, max_acceptable_hit_num=app.state.args.max_acceptable_hit_num, # collected_passage_path=app.state.args.collected_passage_path, ) return app.state._backend def __init__( self, log_path, save_dir, run_mode, mturk_config, hit_config, resume=False, annotation="", worker_log_path="", max_acceptable_hit_num=1, # collected_passage_path=None, ): self.logger = WebLogger.set_new_loghandler(log_path) # stdout_handler = logging.StreamHandler(sys.stdout) # self.logger.addHandler(logging.StreamHandler(sys.stdout)) self.run_mode = run_mode self.hit_config = hit_config self.dataset, self.tasks = {}, {} self.qualification_terminal_action = None for mode in COLLECTION_MODES: if mode not in self.hit_config: continue mode_config = self.hit_config[mode] if mode in ["plain", "adv"]: dataset = json.load(open(mode_config["data_path"])) if self.run_mode == "debug": assignment_num = 10 else: assignment_num = ( mode_config["hit_num"] * mode_config["max_assignments"] ) self.dataset[mode] = {d["id"]: d["text"] for d in sum(dataset, [])} assert len(dataset) >= assignment_num for d in dataset: assert len(d) == mode_config["passage_num"] self.tasks[mode] = dataset elif mode == "validation": self.tasks[mode] = json.load(open(hit_config[mode]["data_path"])) self.dataset[mode] = { v["question_id"]: v for v in sum(self.tasks[mode], []) } if "grant_qualification_id" in hit_config[mode]: qual_id = hit_config[mode]["grant_qualification_id"] qual_score = hit_config[mode]["grant_qualification_score"] qual_message = hit_config[mode].get("grant_qualification_message", None) qual_threshold = hit_config[mode]["accuracy_for_qualification"] self.qualification_terminal_action = { "action": "qualification", "qualification_id": qual_id, "qualification_score": qual_score, "qualification_message": qual_message, "threshold": qual_threshold, } else: raise ValueError("unknown collection mode in COLLECTION_MODES") # cache of created data {hit_uid: {worker_id: assignment_uid}} self.available_user_assignments = defaultdict(list) # worker to list of validated question_ids self.worker_validation_history = defaultdict(list) self.worker_gold_validation_history = defaultdict(list) # sanity check of validation self.save_dir = save_dir self.user_assignments = {} self.annotation = annotation if annotation != "" else run_mode self.max_acceptable_hit_num = max_acceptable_hit_num if worker_log_path and os.path.exists(worker_log_path): self.worker_log_path = worker_log_path worker_data = json.load(open(worker_log_path, "r")) self.worker_log = defaultdict(list, worker_data) # self.logger.info(f"Registered worker_id: {len(self.worker_log)}") print(f"Registered worker_id: {len(self.worker_log)}") elif worker_log_path and not os.path.exists(worker_log_path): self.worker_log_path = worker_log_path self.worker_log = defaultdict(list) else: self.worker_log_path = None self.worker_log = None self.mturk_client = None if resume: self.resume_existing_save(save_dir) self.mturk_client = MTurkClient( mturk_config["access_id"], mturk_config["secret_key"], mturk_config["base_url"], run_mode=run_mode, ) return # issue co-occurrent HITs first if exists # they are assumed to require qualification if self.run_mode in ["sandbox", "release"]: self.mturk_client = MTurkClient( mturk_config["access_id"], mturk_config["secret_key"], mturk_config["base_url"], run_mode=run_mode, ) if self.qualification_terminal_action: self.mturk_client.show_qualification_type_stats( self.qualification_terminal_action["qualification_id"] ) self.mturk_client.expire_leftover_hits() for mode, mode_config in self.hit_config.items(): collection_mode = mode_config["collection_mode"] if run_mode in ["sandbox", "release"]: self.mturk_client.set_hit_config( collection_mode=collection_mode, template_path=mode_config.get( "hit_template", mturk_config["template_path"] ), passage_num=mode_config["passage_num"], title=mode_config.get("title", None), max_assignments=mode_config["max_assignments"], hit_cost=mode_config["hit_cost"], assignment_duration=mode_config["assignment_duration"], lifetime=mode_config["lifetime"], qualification_id=mode_config.get("qualification_id", "default"), qualification_threshold=mode_config.get( "qualification_threshold", 50 ), exclude_qualification_id=mode_config.get( "grant_qualification_id", None ), # workers cannot see this HIT if they have this qualification ) hit_uids = self.generate_uids(mode_config["hit_num"]) if mode_config["hit_num"] > len(self.tasks[collection_mode]): raise ValueError("Specified HIT number exceeds the dataset size.") uid_to_hit = self.mturk_client.issue_hits( mode_config["hit_num"], hit_uids, ) elif run_mode == "debug": hit_uids = self.generate_uids(mode_config.get("hit_num", 10)) uid_to_hit = self.issue_dummy_hits(hit_uids) assign_num = self.setup_user_assignments( uid_to_hit, collection_mode, mode_config["max_assignments"], ) print(f"{assign_num} assignments are generated") # urls for easy debug if run_mode == "debug": parent_uids = self.get_parent_hit_uids() for i, uid in enumerate(parent_uids): for j in range(3): frags = [ f"[{i}-{j}] {mturk_config['base_url']}?uid={uid}", f"worker_id=XXXXX{i}{j}", f"assignment_id=YYYYY{i}{j}", "status=new", ] print("&".join(frags)) if run_mode == "sandbox": parent_uids = self.get_parent_hit_uids() for i, uid in enumerate(parent_uids): for j in range(3): frags = [ f"[{i}-{j}] {mturk_config['base_url']}?uid={uid}", f"worker_id=XXXXX{i}{j}", f"assignment_id=YYYYY{i}{j}", "status=new", ] print("&".join(frags)) def get_parent_hit_uids(self): return [ uid for uid, assignment in self.user_assignments.items() if assignment.annotation == "parent_hit" ] def resume_existing_save(self, save_dir): files = glob.glob(os.path.join(save_dir, "*.json")) for file in files: data = json.load(open(file, "r")) if data.get("annotation", "") == "parent_hit": user_data = UserStatus() user_data.__dict__ = data self.user_assignments[user_data.uid] = user_data continue if "state" not in data: continue data["state"] = SourcingState[data["state"]] user_data = UserStatus() user_data.__dict__ = data self.user_assignments[user_data.uid] = user_data # reconstruct worker_validation_history if self.collection_mode == "validation" and self.user_assignments[user_data.uid] == "FINISHED": question_ids = [v['question_id'] for v in self.user_assignments[user_data.uid].validation_data] worker_id = self.user_assignments[user_data.uid].worker_id self.worker_validation_history[worker_id].append(question_ids) for v in self.user_assignments[user_data.uid].validation_data: if "GOLD" in v['question_id']: self.worker_gold_validation_history[worker_id].append((v['question_id'], v['correct'])) def generate_uids(self, user_num): return [uuid4().hex for i in range(user_num)] def issue_dummy_hits(self, uids): return { uid: {"hit_id": uuid4().hex, "completion_code": uuid4().hex} for uid in uids } def check_if_known_worker_id(self, worker_id): return worker_id in self.worker_log def check_if_worker_can_accept_hit(self, worker_id, uid): self.logger.info( f"worker_id: {worker_id}={len(self.worker_log[worker_id])} hits done" ) return len(self.worker_log[worker_id]) < self.max_acceptable_hit_num def restore_expired_user_assignments(self, master_hit_uid): reference_time = "current_time" earliest_uid = None earliest_start_datetime = None restored_counter = 0 for uid, user_assignment in self.user_assignments.items(): if user_assignment.parent_hit_uid != master_hit_uid: continue if self.get_current_state(uid) == SourcingState.WAIT: self.available_user_assignments[master_hit_uid].append(uid) restored_counter += 1 continue if self.get_current_state(uid) in [ SourcingState.FINISHED, SourcingState.QUIT, ]: continue to_restore = False start_datetime = datetime.fromisoformat(user_assignment.start_time) if reference_time == "start_time": mode_config = self.hit_config[user_assignment.collection_mode] assign_duration = mode_config.get("assignment_duration", 60 * 30) if datetime.now() >= start_datetime + timedelta( seconds=assign_duration ): to_restore = True else: # the datetime to see if the assignment is still alive or not current_time = datetime.fromisoformat(user_assignment.current_time) print( f"working check: now/latest = {datetime.now()}/{current_time}+30s" ) # refresh every 10 sec in javascript if datetime.now() >= current_time + timedelta(seconds=30): to_restore = True if to_restore: self.set_current_state(uid, SourcingState.WAIT) self.available_user_assignments[master_hit_uid].append(uid) restored_counter += 1 if earliest_uid is None or earliest_start_datetime > start_datetime: earliest_uid = uid earliest_start_datetime = start_datetime if len(self.available_user_assignments[master_hit_uid]) == 0: if earliest_uid is not None: # get earliest task in case no available hits print(f"restore earliest_uid: {earliest_uid}") self.available_user_assignments[master_hit_uid].append(earliest_uid) restored_counter += 1 self.logger.info(f"restored {restored_counter} assignments") # print(f"restored {restored_counter} assignments") def get_available_user_assignment(self, master_hit_uid): if len(self.available_user_assignments[master_hit_uid]) == 0: self.restore_expired_user_assignments(master_hit_uid) if len(self.available_user_assignments[master_hit_uid]) > 0: return self.available_user_assignments[master_hit_uid].pop(0) else: return None def check_user_data(self, uid, status=None, worker_id=None, assignment_id=None): if uid not in self.user_assignments: return None if self.user_assignments[uid].annotation == "parent_hit": child_uids = self.user_assignments[uid].comment.split(",") hit_uid = uid for child_uid in child_uids: if worker_id == self.user_assignments[child_uid].worker_id: return child_uid uid = self.get_available_user_assignment(hit_uid) if uid is None: return None if status == "new" or self.run_mode == "debug": self.init_user_assignments(uid, worker_id, assignment_id) return uid def setup_user_assignments(self, uid_to_hit, collection_mode, max_assignments): new_uid_to_hit = {} for hit_uid, hit in uid_to_hit.items(): assignment_uids = self.generate_uids(max_assignments) self.user_assignments[hit_uid] = UserStatus( uid=hit_uid, hit_id=hit["hit_id"], annotation="parent_hit", collection_mode=collection_mode, comment=",".join(assignment_uids), ) hit["uid"] = hit_uid self.user_assignments[hit_uid].save_to(self.save_dir) for assignment_uid in assignment_uids: new_uid_to_hit[assignment_uid] = hit self.available_user_assignments[hit_uid] = assignment_uids uid_to_hit = new_uid_to_hit if collection_mode in ["plain", "adv"]: for uid, hit in uid_to_hit.items(): tasks = self.tasks[collection_mode].pop() passages = [task["text"] for task in tasks] passage_ids = [task["id"] for task in tasks] task_ids = [f"{task['id']}_{collection_mode}" for task in tasks] terminal_actions = self.get_terminal_actions(collection_mode) self.user_assignments[uid] = UserStatus( uid=uid, passage_ids=passage_ids, passages=passages, task_ids=task_ids, state=SourcingState.WAIT, hit_id=hit["hit_id"], parent_hit_uid=hit["uid"], completion_code=hit["completion_code"], current_task_index=0, annotation=self.annotation, collection_mode=collection_mode, terminal_actions=terminal_actions, ) self.user_assignments[uid].save_to(self.save_dir) elif collection_mode in ["validation"]: for uid, hit in uid_to_hit.items(): self.user_assignments[uid] = UserStatus( uid=uid, state=SourcingState.WAIT, hit_id=hit["hit_id"], parent_hit_uid=hit["uid"], completion_code=hit["completion_code"], current_task_index=0, annotation=self.annotation, collection_mode=collection_mode, ) self.user_assignments[uid].save_to(self.save_dir) return len(uid_to_hit) def init_user_assignments(self, uid, worker_id=None, assignment_id=None): if self.user_assignments[uid].assignment_id: self.user_assignments[uid].save_to(self.save_dir) # prev_assignment = self.user_assignments[uid] current_time = datetime.now().isoformat() self.user_assignments[uid].start_time = current_time self.user_assignments[uid].current_time = current_time self.user_assignments[uid].current_task_index = 0 self.user_assignments[uid].state = SourcingState.START self.user_assignments[uid].worker_id = worker_id self.user_assignments[uid].assignment_id = assignment_id self.user_assignments[uid].save_to(self.save_dir) def update_current_time(self, uid): current_time = datetime.now().isoformat() if uid in self.user_assignments: prev_time = self.user_assignments[uid].current_time self.user_assignments[uid].current_time = current_time self.user_assignments[uid].save_to(self.save_dir) self.logger.info(f"Working: {uid} {current_time} <- {prev_time}") else: self.logger.info(f"Update failed: {uid}") def get_current_state(self, uid): if uid in self.user_assignments: return self.user_assignments[uid].state else: return None def set_current_state(self, uid, state): self.user_assignments[uid].state = state def finalize_user_assignment(self, uid): if self.user_assignments[uid].finished_time: self.set_current_state(uid, SourcingState.FINISHED) else: self.set_current_state(uid, SourcingState.QUIT) def get_passages(self, uid): return self.user_assignments[uid].passages def add_new_result(self, uid, task_id, example, response=None): example["submission_time"] = datetime.now().isoformat() if uid not in self.user_assignments: return False self.user_assignments[uid].state = SourcingState.WORKING if response is not None: example.update(response) if response is None or response["success"]: self.user_assignments[uid].filled_data[task_id] = example self.user_assignments[uid].success_task_ids.append(task_id) else: self.user_assignments[uid].failed_data.append(example) self.user_assignments[uid].failed_task_ids.append(task_id) self.update_current_state(uid) return True def save_submission(self, uid, task_id, example, response=None): self.add_new_result(uid, task_id, example, response) self.logger.info(f"submission for {uid}: {example}") self.user_assignments[uid].save_to(self.save_dir) def update_current_state(self, uid):
>>> >>> # creating 5 unique variables for the following strings >>> for i in range(5): ... print(vpool.id('v{0}'.format(i + 1))) 1 2 11 19 20 In some cases, it makes sense to create an external function for accessing IDPool, e.g.: .. code-block:: python >>> # continuing the previous example >>> var = lambda i: vpool.id('var{0}'.format(i)) >>> var(5) 20 >>> var('hello_world!') 21 """ vid = self.obj2id[obj] if vid not in self.id2obj: self.id2obj[vid] = obj return vid def obj(self, vid): """ The method can be used to map back a given variable identifier to the original object labeled by the identifier. :param vid: variable identifier. :type vid: int :return: an object corresponding to the given identifier. Example: .. code-block:: python >>> vpool.obj(21) 'hello_world!' """ if vid in self.id2obj: return self.id2obj[vid] return None def occupy(self, start, stop): """ Mark a given interval as occupied so that the manager could skip the values from ``start`` to ``stop`` (**inclusive**). :param start: beginning of the interval. :param stop: end of the interval. :type start: int :type stop: int """ self._occupied.append([start, stop]) self._occupied.sort(key=lambda x: x[0]) def _next(self): """ Get next variable ID. Skip occupied intervals if any. """ self.top += 1 while self._occupied and self.top >= self._occupied[0][0]: if self.top <= self._occupied[0][1]: self.top = self._occupied[0][1] + 1 self._occupied.pop(0) return self.top # #============================================================================== class CNF(object): """ Class for manipulating CNF formulas. It can be used for creating formulas, reading them from a file, or writing them to a file. The ``comment_lead`` parameter can be helpful when one needs to parse specific comment lines starting not with character ``c`` but with another character or a string. :param from_file: a DIMACS CNF filename to read from :param from_fp: a file pointer to read from :param from_string: a string storing a CNF formula :param from_clauses: a list of clauses to bootstrap the formula with :param from_aiger: an AIGER circuit to bootstrap the formula with :param comment_lead: a list of characters leading comment lines :type from_file: str :type from_fp: file_pointer :type from_string: str :type from_clauses: list(list(int)) :type from_aiger: :class:`aiger.AIG` (see `py-aiger package <https://github.com/mvcisback/py-aiger>`__) :type comment_lead: list(str) """ def __init__(self, from_file=None, from_fp=None, from_string=None, from_clauses=[], from_aiger=None, comment_lead=['c']): """ Constructor. """ self.nv = 0 self.clauses = [] self.comments = [] if from_file: self.from_file(from_file, comment_lead, compressed_with='use_ext') elif from_fp: self.from_fp(from_fp, comment_lead) elif from_string: self.from_string(from_string, comment_lead) elif from_clauses: self.from_clauses(from_clauses) elif from_aiger: self.from_aiger(from_aiger) def from_file(self, fname, comment_lead=['c'], compressed_with='use_ext'): """ Read a CNF formula from a file in the DIMACS format. A file name is expected as an argument. A default argument is ``comment_lead`` for parsing comment lines. A given file can be compressed by either gzip, bzip2, or lzma. :param fname: name of a file to parse. :param comment_lead: a list of characters leading comment lines :param compressed_with: file compression algorithm :type fname: str :type comment_lead: list(str) :type compressed_with: str Note that the ``compressed_with`` parameter can be ``None`` (i.e. the file is uncompressed), ``'gzip'``, ``'bzip2'``, ``'lzma'``, or ``'use_ext'``. The latter value indicates that compression type should be automatically determined based on the file extension. Using ``'lzma'`` in Python 2 requires the ``backports.lzma`` package to be additionally installed. Usage example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_file('some-file.cnf.gz', compressed_with='gzip') >>> >>> cnf2 = CNF(from_file='another-file.cnf') """ with FileObject(fname, mode='r', compression=compressed_with) as fobj: self.from_fp(fobj.fp, comment_lead) def from_fp(self, file_pointer, comment_lead=['c']): """ Read a CNF formula from a file pointer. A file pointer should be specified as an argument. The only default argument is ``comment_lead``, which can be used for parsing specific comment lines. :param file_pointer: a file pointer to read the formula from. :param comment_lead: a list of characters leading comment lines :type file_pointer: file pointer :type comment_lead: list(str) Usage example: .. code-block:: python >>> with open('some-file.cnf', 'r') as fp: ... cnf1 = CNF() ... cnf1.from_fp(fp) >>> >>> with open('another-file.cnf', 'r') as fp: ... cnf2 = CNF(from_fp=fp) """ self.nv = 0 self.clauses = [] self.comments = [] comment_lead = tuple('p') + tuple(comment_lead) for line in file_pointer: line = line.strip() if line: if line[0] not in comment_lead: cl = [int(l) for l in line.split()[:-1]] self.nv = max([abs(l) for l in cl] + [self.nv]) self.clauses.append(cl) elif not line.startswith('p cnf '): self.comments.append(line) def from_string(self, string, comment_lead=['c']): """ Read a CNF formula from a string. The string should be specified as an argument and should be in the DIMACS CNF format. The only default argument is ``comment_lead``, which can be used for parsing specific comment lines. :param string: a string containing the formula in DIMACS. :param comment_lead: a list of characters leading comment lines :type string: str :type comment_lead: list(str) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_string(='p cnf 2 2\\n-1 2 0\\n1 -2 0') >>> print(cnf1.clauses) [[-1, 2], [1, -2]] >>> >>> cnf2 = CNF(from_string='p cnf 3 3\\n-1 2 0\\n-2 3 0\\n-3 0\\n') >>> print(cnf2.clauses) [[-1, 2], [-2, 3], [-3]] >>> print(cnf2.nv) 3 """ self.from_fp(StringIO(string), comment_lead) def from_clauses(self, clauses): """ This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [5]]) >>> print(cnf.clauses) [[-1, 2], [1, -2], [5]] >>> print(cnf.nv) 5 """ self.clauses = copy.deepcopy(clauses) for cl in self.clauses: self.nv = max([abs(l) for l in cl] + [self.nv]) def from_aiger(self, aig, vpool=None): """ Create a CNF formula by Tseitin-encoding an input AIGER circuit. Input circuit is expected to be an object of class :class:`aiger.AIG`. Alternatively, it can be specified as an :class:`aiger.BoolExpr`, or an ``*.aag`` filename, or an AIGER string to parse. (Classes :class:`aiger.AIG` and :class:`aiger.BoolExpr` are defined in the `py-aiger package <https://github.com/mvcisback/py-aiger>`__.) :param aig: an input AIGER circuit :param vpool: pool of variable identifiers (optional) :type aig: :class:`aiger.AIG` (see `py-aiger package <https://github.com/mvcisback/py-aiger>`__) :type vpool: :class:`.IDPool` Example: .. code-block:: python >>> import aiger >>> x, y, z = aiger.atom('x'), aiger.atom('y'), aiger.atom('z') >>> expr = ~(x | y) & z >>> print(expr.aig) aag 5 3 0 1 2 2 4 8 10 6 3 5 10 6 8 i0 y i1 x i2 z o0 6c454aea-c9e1-11e9-bbe3-3af9d34370a9 >>> >>> from pysat.formula import CNF >>> cnf = CNF(from_aiger=expr.aig) >>> print(cnf.nv) 5 >>> print(cnf.clauses) [[3, 2, 4], [-3, -4], [-2, -4], [-4, -1, 5], [4, -5], [1, -5]] >>> print(['{0} <-> {1}'.format(v, cnf.vpool.obj(v)) for v in cnf.inps]) ['3 <-> y', '2 <-> x', '1 <-> z'] >>> print(['{0} <-> {1}'.format(v, cnf.vpool.obj(v)) for v in cnf.outs]) ['5 <-> 6c454aea-c9e1-11e9-bbe3-3af9d34370a9'] """ assert aiger_present, 'Package \'py-aiger-cnf\' is unavailable. Check your installation.' # creating a pool of variable IDs if necessary self.vpool = vpool if vpool else IDPool() # Use py-aiger-cnf to insulate from internal py-aiger details. aig_cnf = aiger_cnf.aig2cnf(aig, fresh=self.vpool.id, force_true=False) self.clauses = [list(cls) for cls in aig_cnf.clauses] self.comments = ['c ' + c.strip() for c in aig_cnf.comments] self.nv = max(map(abs, itertools.chain(*self.clauses))) # saving input and output variables self.inps = list(aig_cnf.input2lit.values()) self.outs = list(aig_cnf.output2lit.values()) # updating input name to variable mappings for var in self.inps: name = self.vpool.id2obj[var].name self.vpool.obj2id[name] = var self.vpool.id2obj[var] = name # saving the output in the pool by its name for name, lit in aig_cnf.output2lit.items(): self.vpool.obj2id[name] = lit self.vpool.id2obj[lit] = name def copy(self): """ This method can be used for creating a copy of a CNF object. It creates another object of the :class:`CNF` class and makes use of the *deepcopy* functionality to copy the clauses. :return: an object of class :class:`CNF`. Example: .. code-block:: python >>> cnf1 = CNF(from_clauses=[[-1, 2], [1]]) >>> cnf2 = cnf1.copy() >>> print(cnf2.clauses) [[-1, 2], [1]] >>> print(cnf2.nv) 2 """ cnf = CNF() cnf.nv = self.nv cnf.clauses = copy.deepcopy(self.clauses) cnf.comments = copy.deepcopy(self.comments) return cnf def to_file(self, fname, comments=None, compress_with='use_ext'): """ The method is for saving a CNF formula into a file in the DIMACS CNF format. A file name is expected as an argument. Additionally, supplementary comment lines can
= [sr_comp.settlements[s].pos[0] for s in ss] # Calculate the distances ds = np.sqrt(np.array([((pos_series[pos][0] - self_pos[0]) ** 2 + (pos_series[pos][1] - self_pos[1]) ** 2) for pos in pos_ids])) * self.model.cellSize xtent_dst = CAgentUtilityFunctions.xtent_distribution(ws, ds, IEComponent.b, IEComponent.m) # Add Other Set of Households to possible parent candidates probabilistically for i in range(len(xtent_dst)): if self.model.random.random() < xtent_dst[i]: # Add all households households.extend([self.model.environment.getAgent(h) for h in sr_comp.settlements[ss[i]].occupants]) # Check to see if all households do not have any resource, if so, randomly choose if len(households) == 0: return self.model.random.choice([self.model.environment.getAgent(h) for h in self.model.environment[ SettlementRelationshipComponent].settlements[sID].occupants if self.model.environment.getAgent(h) != new_household]) # Get status distribution h_status = [h.social_status() for h in households if h.social_status] h_status_total = sum(h_status) h_weighted_distribution = [] for status in h_status: if len(h_weighted_distribution) == 0: h_weighted_distribution.append(status / h_status_total) else: h_weighted_distribution.append(h_weighted_distribution[-1] + status / h_status_total) random_val = self.model.random.random() for i in range(len(h_weighted_distribution)): if h_weighted_distribution[i] < random_val: return households[i] return households[-1] # Default to last agent if value is not lower than any of the agents in the distribution @staticmethod def decode(params: dict): return AgentPopulationSystem(params['id'], params['model'], params['priority'], params['birth_rate'], params['death_rate'], params['yrs_per_move'], params['init_settlements'], params['cell_capacity']) class AgentRBAdaptationSystem(System, IDecodable, ILoggable): # Cumulative Moving Average rainfall_CMA = 0.0 flood_CMA = 0.0 per_severity_index = [[0.7, 0.2, 0.5], [0.4, 0.1, 0.1]] # 1st is RAINFALL and 2nd is FLOOD def __init__(self,id: str, model: Model, priority: int): System.__init__(self, id, model, priority=priority) IDecodable.__init__(self) ILoggable.__init__(self, 'model.RBAS') def execute(self): # Update CMAs ge_comp = self.model.environment[GlobalEnvironmentComponent] rf_mean = np.mean(ge_comp.rainfall) AgentRBAdaptationSystem.flood_CMA += (ge_comp.flood - AgentRBAdaptationSystem.flood_CMA) / (self.model.systemManager.timestep + 1) AgentRBAdaptationSystem.rainfall_CMA += (rf_mean - AgentRBAdaptationSystem.rainfall_CMA) / (self.model.systemManager.timestep + 1) for settlement in [self.model.environment[SettlementRelationshipComponent].settlements[s] for s in self.model.environment[SettlementRelationshipComponent].settlements]: # Calculate Learning modifiers per settlement hs = [self.model.environment.getAgent(h) for h in settlement.occupants] h_social_status = [h.social_status() for h in hs] total_wealth = sum(h_social_status) ws = [h / total_wealth if total_wealth != 0 else 0.0 for h in h_social_status] ws_sum = sum(ws) if ws_sum != 0: peer_resource_chances_mean = sum( [hs[i][HouseholdRelationshipComponent].peer_resource_transfer_chance * ws[i] for i in range(len(hs)) ] ) / ws_sum sub_resource_chances_mean = sum( [hs[i][HouseholdRelationshipComponent].sub_resource_transfer_chance * ws[i] for i in range(len(hs)) ] ) / ws_sum for agent in [self.model.environment.getAgent(h) for h in settlement.occupants]: adapt_comp = agent[HouseholdRBAdaptiveComponent] # Update memories with some error margin adapt_comp.update_rainfall_memory(rf_mean + 0.05 * self.model.random.randrange(-1.0, 1.0) * rf_mean) adapt_comp.update_flood_memory(ge_comp.flood + 0.05 * self.model.random.randrange(-1.0, 1.0) * ge_comp.flood) # Need to build memory before making decisions. if self.model.systemManager.timestep < HouseholdRBAdaptiveComponent.yrs_to_look_back: return res_comp = agent[ResourceComponent] # Calculate Risk Appraisal deltas = [ abs(sum([adapt_comp.rainfall_memory[x] * HouseholdRBAdaptiveComponent.yr_look_back_weights[x] for x in range(HouseholdRBAdaptiveComponent.yrs_to_look_back)]) + (0.01 * self.model.random.random() ) - AgentRBAdaptationSystem.rainfall_CMA) / AgentRBAdaptationSystem.rainfall_CMA, abs(sum([adapt_comp.flood_memory[x] * HouseholdRBAdaptiveComponent.yr_look_back_weights[x] for x in range(HouseholdRBAdaptiveComponent.yrs_to_look_back)]) + (0.01 * self.model.random.random() ) - AgentRBAdaptationSystem.flood_CMA) / AgentRBAdaptationSystem.flood_CMA ] # If Rainfall delta is less than the flood delta, use the flood delta sev_index = deltas[0] if deltas[0] > deltas[1] else deltas[1] index = 0 if deltas[0] > deltas[1] else 1 # Set severity index if sev_index < 0: sev_index = 0 elif sev_index > 0: sev_index = 2 else: sev_index = 1 # Now we can calculate the severity value severity = deltas[index] * AgentRBAdaptationSystem.per_severity_index[index][sev_index] if severity > 1.0: severity = 1.0 elif severity < 0.0: severity = 0.0 # Calculate risk appraisal risk_appraisal = 0.6 * severity + 0.4 * self.model.random.random() # Calculate Adaptation Appraisal w_age = 1.0 - (0.12 / (0.12 + (min(50.0, res_comp.average_age()) / 50.0) ** 3)) # Here we use household capacity w_hh_size = 1.0 - (0.12 / (0.12 + ( min(ResourceComponent.carrying_capacity, res_comp.able_workers() ) / ResourceComponent.carrying_capacity) ** 3)) adaptation_efficancy = 0.55 * w_age + 0.45 * w_hh_size + (0.2 - 0.3 * self.model.random.random()) w_wealth = 1.0 / (1.0 + math.exp(-3.0 * ((res_comp.resources / res_comp.required_resources()) - 0.5))) self_efficancy = 0.3 * w_wealth + 0.6 * adapt_comp.percentage_to_farm + (0.1 - 0.2 * self.model.random.random()) adaptation_appraisal = 0.5 * (adaptation_efficancy + self_efficancy) if adaptation_appraisal < 0.0: adaptation_appraisal = 0.0 elif adaptation_appraisal > 1.0: adaptation_appraisal = 1.0 # Calculate Adaptation Intention # Note: There is no adaptation cost in this model r = HouseholdRBAdaptiveComponent.risk_elasticity * risk_appraisal p = adaptation_appraisal * (1 - HouseholdRBAdaptiveComponent.cognitive_bias) adaptation_intention = p - r adaptation_modifier = 0.0 # Assume Maladaptation # Now determine if successful adaptation vs. maladaptation occurs if adaptation_intention > HouseholdRBAdaptiveComponent.adaptation_intention_threshold: # Adaptation Occurs adaptation_modifier = HouseholdRBAdaptiveComponent.learning_rate self.logger.info('HOUSEHOLD.ADAPTATION.INTENDED: {}'.format(agent.id)) elif self.model.random.random() < 0.01: # Described as Ingenuity Change adaptation_modifier = HouseholdRBAdaptiveComponent.learning_rate * self.model.random.random() self.logger.info('HOUSEHOLD.ADAPTATION.INGENUITY: {}'.format(agent.id)) # Get Experience adaptation_experience_modifier = statistics.mean([ h[HouseholdRBAdaptiveComponent].percentage_to_farm for h in hs ]) * HouseholdRBAdaptiveComponent.learning_rate # Update adaptation value adapt_comp.percentage_to_farm += adaptation_modifier * adaptation_experience_modifier + 0.001 * self.model.random.random() if adapt_comp.percentage_to_farm < 0.0: adapt_comp.percentage_to_farm = 0.0 elif adapt_comp.percentage_to_farm > 1.0: adapt_comp.percentage_to_farm = 1.0 # Update Sharing Preferences if ws_sum != 0: # Do not update anything if the entire settlement has no value peer_trade_modifier = (peer_resource_chances_mean - agent[ HouseholdRelationshipComponent].peer_resource_transfer_chance ) * HouseholdRBAdaptiveComponent.learning_rate sub_trade_modifier = (sub_resource_chances_mean - agent[ HouseholdRelationshipComponent].sub_resource_transfer_chance ) * HouseholdRBAdaptiveComponent.learning_rate agent[HouseholdRelationshipComponent].peer_resource_transfer_chance += peer_trade_modifier agent[HouseholdRelationshipComponent].sub_resource_transfer_chance += sub_trade_modifier @staticmethod def decode(params: dict): return AgentRBAdaptationSystem(params['id'], params['model'], params['priority']) class BeliefSpace: def __init__(self, forage_utility, farm_utility, learning_rate, conformity, peer_transfer, sub_transfer): self.forage_utility = forage_utility self.farm_utility = farm_utility self.learning_rate = learning_rate self.conformity = conformity self.peer_transfer = peer_transfer self.sub_transfer = sub_transfer def influence(self, bs, dst_penalty: float): self.forage_utility += (bs.forage_utility - self.forage_utility) * self.conformity * dst_penalty self.farm_utility += (bs.farm_utility - self.farm_utility) * self.conformity * dst_penalty self.learning_rate += (bs.learning_rate - self.learning_rate) * self.conformity * dst_penalty self.peer_transfer += (bs.peer_transfer - self.peer_transfer) * self.conformity * dst_penalty self.sub_transfer += (bs.sub_transfer - self.sub_transfer) * self.conformity * dst_penalty # Do Conformity Last so it doesn't affect the other results. self.conformity += (bs.conformity - self.conformity) * self.conformity * dst_penalty def jsonify(self): return { 'forage_utility': self.forage_utility, 'farm_utility': self.farm_utility, 'learning_rate': self.learning_rate, 'conformity': self.conformity, 'peer_transfer': self.peer_transfer, 'sub_transfer': self.sub_transfer } def duplicate(self): return BeliefSpace(self.forage_utility, self.farm_utility, self.learning_rate, self.conformity, self.peer_transfer, self.sub_transfer) class AgentIEAdaptationSystem(System, IDecodable, ILoggable): influence_rate = 0.05 def __init__(self,id: str, model: Model, priority: int): System.__init__(self, id, model, priority=priority) IDecodable.__init__(self) ILoggable.__init__(self, 'model.IEAS') self.belief_spaces = {} def execute(self): belief_spaces = {} sr_comp = self.model.environment[SettlementRelationshipComponent] # Calculate Belief Space for each settlement settlements = [sr_comp.settlements[s] for s in sr_comp.settlements] wealth_dict = {} pos_dict = {} for settlement in settlements: belief_spaces[settlement.id] = sr_comp.create_belief_space(settlement.id) wealth_dict[settlement.id] = sr_comp.getSettlementSocialStatus(settlement.id) pos_dict[settlement.id] = sr_comp.settlements[settlement.id].pos[0] # Influence Each Settlement's belief space using XTENT pos_series = self.model.environment.cells['pos'] influenced_belief_spaces = {} # Influence Happens Simultaneously so we have to duplicate the belief spaces for key in belief_spaces: influenced_belief_spaces[key] = belief_spaces[key].duplicate() for settlement in settlements: ss = [s for s in sr_comp.settlements if s != settlement.id] ws = np.array([wealth_dict[s] for s in ss]) self_pos = pos_series[sr_comp.settlements[settlement.id].pos[0]] pos_ids = [pos_dict[s] for s in ss] # Calculate the distances ds = np.sqrt(np.array([((pos_series[pos][0] - self_pos[0]) ** 2 + (pos_series[pos][1] - self_pos[1]) ** 2) for pos in pos_ids])) * self.model.cellSize xtent_dst = CAgentUtilityFunctions.xtent_distribution(ws, ds, IEComponent.b, IEComponent.m) for index in range(len(xtent_dst)): if xtent_dst[index] > 0.0: influenced_belief_spaces[settlement.id].influence(belief_spaces[ss[index]], xtent_dst[index] if xtent_dst[index] < 1.0 else 1.0) # Have to clamp it at 1 belief_spaces = influenced_belief_spaces # Influence Each Household using Updated Settlement belief spaces for agent in self.model.environment.getAgents(): if self.model.random.random() < AgentIEAdaptationSystem.influence_rate: AgentIEAdaptationSystem.influence_agent(agent, belief_spaces[agent[HouseholdRelationshipComponent ].settlementID]) self.logger.info('HOUSEHOLD.INFLUENCE: {}'.format(agent.id)) self.belief_spaces = belief_spaces @staticmethod def influence_agent(agent: IEHousehold, bs: BeliefSpace): conformity = agent[IEComponent].conformity agent[HouseholdPreferenceComponent].forage_utility += ( bs.forage_utility - agent[HouseholdPreferenceComponent ].forage_utility) * conformity agent[HouseholdPreferenceComponent].farm_utility += (bs.farm_utility - agent[HouseholdPreferenceComponent ].farm_utility) * conformity agent[HouseholdPreferenceComponent].learning_rate += (bs.learning_rate - agent[HouseholdPreferenceComponent ].learning_rate) * conformity agent[IEComponent].conformity += (bs.conformity - agent[IEComponent].conformity) * conformity agent[HouseholdRelationshipComponent].peer_resource_transfer_chance += (bs.peer_transfer - agent[ HouseholdRelationshipComponent].peer_resource_transfer_chance) * conformity agent[HouseholdRelationshipComponent].sub_resource_transfer_chance += (bs.sub_transfer - agent[ HouseholdRelationshipComponent].sub_resource_transfer_chance) * conformity @staticmethod def decode(params: dict): AgentIEAdaptationSystem.influence_rate = params['influence_rate'] return AgentIEAdaptationSystem(params['id'], params['model'], params['priority']) # Collectors class AgentCollector(Collector, IDecodable): def __init__(self, id: str, model: Model): super().__init__(id, model) @staticmethod def gini_coefficient(x): """Compute Gini coefficient of array of values""" diffsum = 0 for i, xi in enumerate(x[:-1], 1): diffsum += np.sum(np.abs(xi - x[i:])) return diffsum / (len(x) ** 2 * np.mean(x)) def collect(self): agents = self.model.environment.getAgents() self.records.append({}) for agent in agents: self.records[self.model.systemManager.timestep][agent.id] = { 'resources': agent[ResourceComponent].resources, 'population': len(agent[ResourceComponent].occupants), 'satisfaction': agent[ResourceComponent].satisfaction, 'load': agent[HouseholdRelationshipComponent].load } self.records[self.model.systemManager.timestep]['total'] = { 'resources': sum([self.records[self.model.systemManager.timestep][x]['resources'] for x in self.records[self.model.systemManager.timestep]]), 'population': sum([self.records[self.model.systemManager.timestep][x]['population'] for x in self.records[self.model.systemManager.timestep]]), 'satisfaction': sum([self.records[self.model.systemManager.timestep][x]['satisfaction'] for x in self.records[self.model.systemManager.timestep]]), 'load': sum([self.records[self.model.systemManager.timestep][x]['load'] for x in self.records[self.model.systemManager.timestep]]) } @staticmethod def decode(params:
import datetime # import os, subprocess import copy from typing import List, Deque, Dict from collections import deque def join(self, DQ2): DQ2_Li = list(DQ2) for i in DQ2_Li: self.append(i) return self def posixTime(self, date, time): date_li = date.split('-') time_li = time.split(':') DT_args = [ int(i) for i in (date_li+time_li) ] return datetime.datetime(*DT_args).timestamp() class a_USER(dict): """ Consumeable class to be used to build a_SESSION instance. In this case, a {key: <str>} with fixed keystr and append()""" def __init__(self, **kwds): self.keystr = tuple(kwds)[0] if len(kwds) == 1 & isinstance(kwds.get(tuple(kwds)[0],False), str): super().__init__(kwds) else: print('a_USER requires a single key-val') print('pair with <str> value type\n') # throw error HERE pass class a_DATE_TIME(dict): """ Consumeable class to be used to build a_SESSION instance. In this case, a {key: List(<str>)]} with fixed keystr and append() that only grows the list until two elements of [date_time] exist. If two elements exist already, the last element is always replaced. This way date_time only contains the first and date_time strings for the session; intermediate date_time strings are of no interest. Usage: DT_instance = a_DATE_TIME(a_date_key = '<KEY>', a_time_key = 'hh:mm:ss' ) another_DT_instance = D_instance.append( a_date_key = '<KEY>', a_time_key = 'hh:mm:ss' ) another_DT_instance = D_instance.append( DT_instance ) """ def __init__(self, **kwds): if (len(kwds) >= 2): self.posix_keystr = 'posixTime' self.datekeystr = tuple(kwds)[0] self.timekeystr = tuple(kwds)[1] date_is_str = isinstance(kwds.get(self.datekeystr,False), str) time_is_str = isinstance(kwds.get(self.timekeystr,False), str) posix_is_List = isinstance(kwds.get(self.posix_keystr,False), List) if (len(kwds) == 2): if ( date_is_str & time_is_str ): # sets up the builtin type super().__init__(**kwds) for key, val in self.items(): self[key] = [val] posix_t = posixTime(self, kwds.get(self.datekeystr,False), kwds.get(self.timekeystr,False)) self.update(posixTime = [posix_t]) else: print('a_date_time requires two key-val') print('pairs with <str> value types.\n') elif (len(kwds) == 3) & posix_is_List: # sets up the builtin type super().__init__(**kwds) else: print('a_date_time requires two key-val') print('pairs with <str> value types.\n') pass # throw ERROR here def check_keystrings(self,kwds): """ keystrings between self and argument being appended must agree """ checkdatekey = (self.datekeystr == kwds.datekeystr) checktimekey = (self.timekeystr == kwds.timekeystr) if checkdatekey & checktimekey: return True def sub_append(self, datekeystr, timekeystr, date, time): if len(self[datekeystr]) <= 1: self[datekeystr].append(date) else: self[datekeystr][-1] = date if len(self[timekeystr]) <= 1: self[timekeystr].append(time) else: self[timekeystr][-1] = time posix_t = posixTime(self, date, time) self[self.posix_keystr].append(posix_t) def append(self, *nargs, **kwds): datekeystr = self.datekeystr timekeystr = self.timekeystr if len(nargs) == 1: if isinstance(nargs[0], type(self)): kwds = nargs[0] if self.check_keystrings(kwds): len_time = len(kwds[timekeystr]) for i in range(len_time): date = kwds[datekeystr][i] time = kwds[timekeystr][i] self.sub_append( datekeystr, timekeystr, date, time) else: ## throw key ERROR HERE pass elif isinstance(nargs[0], Dict): # handles direct Dict() argument kwds = nargs[0] self.append(a_DATE_TIME(**kwds)) else: ## throw invalid arg input HERE pass else: # handles direct keyword args self.append(a_DATE_TIME(**kwds)) class a_FILE(dict): """ Consumeable class to be used to build a_SESSION instance. In this case, a {key: List(<str>)]} with fixed keystr and append() that only grows the list until two elements of [date_time] exist. If two elements exist already, the last element is always replaced. This way date_time only contains the first and date_time strings for the session; intermediate date_time strings are of no interest.""" def __init__(self, **kwds): self.file_keystr = tuple(kwds)[0] self.Inactivity_Period = None file_ID_is_List = isinstance(kwds.get(self.file_keystr, False), List) if len(tuple(kwds)) == 3: ext_keystr = tuple(kwds)[1] CID_keystr = tuple(kwds)[2] file_is_str = isinstance(kwds.get(self.file_keystr,False), str) ext_is_str = isinstance(kwds.get(ext_keystr,False), str) CID_is_str = isinstance(kwds.get(CID_keystr,False), str) if ( file_is_str & ext_is_str & CID_is_str ): # sets up the builtin type file_ID = '' for key, val in kwds.items(): file_ID = file_ID + val file = {self.file_keystr: [file_ID]} super().__init__(**file) elif (len(kwds) == 1) & file_ID_is_List: # sets up the builtin type super().__init__(**kwds) else: print('a_File requires three key-val') print('pairs (file, ext, and CID) with') print('<str> value type.\n') # throw ERROR here pass def check_keystrings(self,kwds): """ keystrings between self and argument being appended must agree """ checkfilekey = (self.file_keystr == kwds.file_keystr) if checkfilekey: return True def append(self, *nargs, **kwds): file_keystr = self.file_keystr if len(nargs) == 1: kwds = nargs[0] if isinstance(nargs[0], type(self)): if self.check_keystrings(kwds): len_files = len(kwds[file_keystr]) for i in range(len_files): file = kwds[file_keystr][i] self[file_keystr].append( file) else: ## throw key ERROR HERE pass elif isinstance(nargs[0], Dict): # handles direct Dict() argument self.append(a_FILE(**kwds)) else: ## throw invalid arg input HERE pass else: # handles direct keyword args self.append(a_FILE(**kwds)) class a_userSESSION(dict): """ a_userSESSION - <Dict>. builds the record from consumeable classes a_USER, a_DATE_TIME, a_FILE, and possible others. a_userSESSION instance initializes an indivual session and determines the behavior of append. Behavior of a_userSESSION is influenced by its superclasses in an independent manner (default). Interaction between superclass objects like a_USER, a_DATE_TIME, and a_FILE can be coded within a_userSESSION.""" def __init__(self, user, datetime, file, **kwds): # if isUSER & isDATETIME & isFILE: # datetime_copy = copy.deepcopy(datetime) # file_copy = copy.deepcopy(file) super().__init__() self.a_USER = user self.a_FILE = file self.a_DATE_TIME = datetime self.update(user) self.update(file) self.update(datetime) self.case = '' for key, val in kwds.items(): self[key] = [val] def append(self, user, datetime, file, **kwds): # datetime_copy = copy.deepcopy(datetime) # file_copy = copy.deepcopy(file) if self.a_USER[self.a_USER.keystr] == user[self.a_USER.keystr]: self.a_DATE_TIME.append(datetime) self.a_FILE.append(file) self.update(user) self.update(file) self.update(datetime) for key, val in kwds.items(): self[key].append(val) else: print('\n userID mismatch...') class SESSIONS(deque): """subclasses deque and has the following syntax expectations: 1) a_SESSIONS = SESSIONS() >>> deque([{}]) Constructs a deque of dict objects where the lefmost iterates SESSIONS[0] are he oldest sessions, and the rightmost iterates, SESSIONS[-1]] are the newest sessions. 2) Implementation first checks if val1 already exists in any SESSIONS iterate. If iterate position P already contains 'key1': val1, SESSIONS[P] is updated in the following manner: key1, val1 pair is preserved and all others are appended as below: SESSIONS[P] = {'key1': val1, 'key2': list2.append(val2), 'key3': list3.append(val3),'key_N':,listN.append(val_N) } SESSIONS[P] is then popped and right appended as SESSIONS[-1] 4) If val1 does not exist in any iterate of SESSIONS, a new iterate is created as: SESSIONS[0] = {'key1': val1, 'key2': [].append(val2), 'key3': []].append(val3),'key_N':,[].append(val_N) } 5) [optional] provide a suitable way to flag any key for appending only unique values to its corresponding val list. 6) Add an additional key: val pair called Accession index. It refers to the global value of the accession number of the session's 1st web accession. Loosely speaking, it is the line number of the file wherein the session's 1st accession record was pulled. If the file or stream is chopped into blocks for scalable processing, this line number must be preserved for later SESSIONS merging tasks. 7) A SESSIONS object can be be merged with another SESSIONS object. Rules for merging SESSIONS1 and SESSIONS2 first require that each cover non-overlapping time ranges. These are referred to as BLOCKS of the input stream. The rules for merging SESSIONS are: 1) Assume SESSIONS1 is older than SESSIONS2. SESSIONS iterates SESSIONS1[P] and SESSIONS2[Q] may be merged if their key1 values (userIP) agree. 2) if key4 represents accession time, then: SESSIONS2[P]['key4'][0] - SESSIONS2[Q]['key4'][-1] > inactivity_period 3) Merging looks like: SESSIONS1[P]['keyN'].append(SESSIONS2[Q]['keyN']) Further clarification of SESSIONS is required to properly organize each merged pair in the proper deque order and for determining whethe a merged section is established as inactive. 4) Some SESSIONS are begun within at least one inactivity period of the time boundary of each BLOCK. This means that a SESSION could be carried over to or from a neighboring BLOCK. After processing a block of accessions, and prior to each stage of a merge operation, SESSIONS must be identified according time from a BLOCKS beginning or end as measured from the left and right SESSION iterate's boundary. The SESSION iterates can be classified as: 1) left inactive, right active (SIA) 2) left inactive, right inactive (process and save to file) (SII) 3) left active, right inactive (SAI) 4) left active, right active (SAA) 5) Finally, merging is always done upon pairs of SESSIONS, from contiguous BLOCKS. After merging, each pair {S_left, S_right} must have activity classification as either {S_left, S_right} -> {{1, 3} or {1, 4}}. That is: {S_left, S_right} -> {{SIA, SAI} or {SIA, SAA}} 6) During SESSIONS creation on a BLOCK by BLOCK basis, all SESSIONS[P] that satisfy condition 4) are set aside in a special SESSIONS called SAA. During subsequent pairwise merging, all merged SESSIONS that contain at least one candidate merge dyad {S_left, S_right} -> {{SIA, SAA}} """ #class SESSIONS(deque): def __init__(self, *nargs ): def error_msg(): s1 = 'Sessions must be initialized with a string argument representing' s2 = 'the key field for unique users. A good example is "userIP"' s3 = 'SESSIONS also requires <float> Inactivity_Period.' s4 = 'SESSIONS may be initialized with a SESSIONS object of a deque() object ' s5 = 'containing, at least, elements of a_userSESSION()' print('\n{0}\n{1}\n{2}\n{3}\n{4}'.format(s1, s2, s3, s4, s5)) if (len(nargs) == 2): is_ID_keystr = isinstance(nargs[0], str) is_Inactivity_Period = isinstance(nargs[1], float) if is_ID_keystr & is_Inactivity_Period: super().__init__() self.ID_keystr = nargs[0] self.Inactivity_Period = nargs[1] else: error_msg() elif (len(nargs) == 1) & isinstance(nargs[0],Deque): super().__init__(nargs[0]) self.ID_keystr = tuple(nargs[0][0])[0] self.Inactivity_Period = nargs[0][0].a_FILE.Inactivity_Period else: error_msg() def Inactivity_Test(self,S, user_session): elapsed = user_session.a_DATE_TIME[user_session.a_DATE_TIME.posix_keystr][-1] - \ S.a_DATE_TIME[S.a_DATE_TIME.posix_keystr][-1] if elapsed > self.Inactivity_Period: return True else: return False def append(self,user_session): # user_session = copy.deepcopy(user_sess) ID_key = self.ID_keystr the_user = user_session.a_USER.get(ID_key,False) if(the_user): if len(self): found_key = False idx = -1 for S in self: idx += 1 user_test = S.get(ID_key, False) == the_user expired = self.Inactivity_Test(S, user_session) # print('\nexpired = {0}'.format(expired)) if user_test & (not expired) & (not found_key): found_key = True S.a_DATE_TIME.append(user_session.a_DATE_TIME) S.a_FILE.append(user_session.a_FILE) S.a_FILE.Inactivity_Period = self.Inactivity_Period S['order'].append(user_session['order'][0]) # user_session = copy.deepcopy(user_sess) if idx > 0: super().rotate(-idx) super().popleft() super().rotate(idx) super().appendleft(S) break if not(found_key): super().appendleft(user_session) else: super().appendleft(user_session) else: print('\n{0}\n{1}{2}. \n{3}'.\ format('Sessions is previously initialized with ', 'the key field for unique users called: ', 'self.ID_keystr, But no such key was found \ in last key-val assignment.')) pass def EOSdump(self): idx = 0 for i in range(len(self)): idx +=1 jdx = 0 for j in range(len(self)): jdx +=1 if self[i]['order'][0] < \ self[j]['order'][0]: # if self[i].a_DATE_TIME['posixTime'][0] < \ # self[j].a_DATE_TIME['posixTime'][0]: self.rotate(-i) i_temp = self.popleft() self.rotate(i-j) j_temp = self.popleft() self.appendleft(i_temp) self.rotate(j-i) self.appendleft(j_temp) self.rotate(i) print(self[j]['order'][0]) return self # class a_SESSION(dict): # """ACCESSION - Needs the record ID keyname. i.e. "userID" # """ # def __init__(self, ID_keystr, **kwds): # super().__init__(**kwds) #
import copy import json import os from unittest.mock import Mock, patch from app.common import client, datasets, path from app.common.test import BaseApiTest class Response(object): def __init__(self, content, status_code=200): self.content = content self.status_code = status_code def json(self): return json.loads(self.content) def raise_for_status(self): raise Exception(self.status_code) def setupResponse(response): ctx_mgr = Mock() ctx_mgr.__enter__ = Mock(return_value=response) ctx_mgr.__exit__ = Mock(return_value=None) return ctx_mgr def setupResponseWithSideEffect(responses): ctx_mgr = Mock() ctx_mgr.__enter__ = Mock(side_effect=responses) ctx_mgr.__exit__ = Mock(return_value=None) return ctx_mgr class TTLHashTest(BaseApiTest): @patch( "time.time", ) def testSameHash(self, time_fct): time_fct.return_value = 1000000 hash1 = client.get_ttl_hash() time_fct.return_value = 1000005 hash2 = client.get_ttl_hash() self.assertEqual(hash1, hash2) @patch( "time.time", ) def testDifferentHashes(self, time_fct): time_fct.return_value = 1000000 hash1 = client.get_ttl_hash() time_fct.return_value = 1000015 hash2 = client.get_ttl_hash() self.assertNotEqual(hash1, hash2) class DatasetListTest(BaseApiTest): DATASETS = [ { "ds_id": 1, "is_raster": True, }, { "ds_id": 2, "is_raster": False, }, ] def setUp(self): super().setUp() os.makedirs(os.path.join(self.wms_cache_dir, "rasters", "1")) @patch("requests.get") def testWithoutFiltering(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(DatasetListTest.DATASETS)) ) datasets = client.get_dataset_list(disable_filtering=True) self.assertEqual(len(get_mock.call_args.args), 1) self.assertEqual(get_mock.call_args.args[0], "dataset_list") self.assertEqual(len(get_mock.call_args.kwargs), 0) self.assertEqual(datasets, DatasetListTest.DATASETS) @patch("requests.get") def testWithFiltering(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(DatasetListTest.DATASETS)) ) datasets = client.get_dataset_list() self.assertEqual(len(get_mock.call_args.args), 1) self.assertEqual(get_mock.call_args.args[0], "dataset_list") self.assertEqual(len(get_mock.call_args.kwargs), 0) self.assertEqual(len(datasets), 1) self.assertEqual(datasets[0], DatasetListTest.DATASETS[0]) @patch("requests.get") def testFailure(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse(Response(None, 500)) datasets = client.get_dataset_list() self.assertEqual(len(datasets), 0) @patch("requests.get") def testException(self, get_mock): with self.flask_app.app_context(): get_mock.side_effect = Exception() datasets = client.get_dataset_list() self.assertEqual(len(datasets), 0) class ParametersTest(BaseApiTest): PARAMETERS = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [ "var1", ], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {}, } @patch("requests.get") def testSuccess(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(ParametersTest.PARAMETERS)) ) parameters = client.get_parameters(1) self.assertEqual(len(get_mock.call_args.args), 1) self.assertEqual(get_mock.call_args.args[0], "rpc/enermaps_get_parameters") self.assertEqual(len(get_mock.call_args.kwargs), 2) self.assertTrue("headers" in get_mock.call_args.kwargs) self.assertTrue("params" in get_mock.call_args.kwargs) self.assertTrue("Authorization" in get_mock.call_args.kwargs["headers"]) self.assertTrue("id" in get_mock.call_args.kwargs["params"]) self.assertEqual(get_mock.call_args.kwargs["params"]["id"], 1) self.assertEqual(parameters, datasets.convert(ParametersTest.PARAMETERS)) @patch("requests.get") def testFailure(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse(Response(None, 500)) parameters = client.get_parameters(1) self.assertTrue(parameters is None) @patch("requests.get") def testException(self, get_mock): with self.flask_app.app_context(): get_mock.side_effect = Exception() parameters = client.get_parameters(1) self.assertTrue(parameters is None) class AreasTest(BaseApiTest): def testSuccess(self): with self.flask_app.app_context(): areas = client.get_areas() self.assertEqual(len(areas), 5) for area in areas: self.assertTrue("id" in area) self.assertTrue("title" in area) class RasterFileTest(BaseApiTest): RASTER_CONTENT = b"this is a raster file" @patch("requests.get") def testSuccess(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(RasterFileTest.RASTER_CONTENT) ) content = client.get_raster_file(1, "FID.tif") self.assertEqual(len(get_mock.call_args.args), 1) self.assertEqual(get_mock.call_args.args[0], "1/FID.tif") self.assertEqual(len(get_mock.call_args.kwargs), 1) self.assertTrue("stream" in get_mock.call_args.kwargs) self.assertTrue(get_mock.call_args.kwargs["stream"]) self.assertEqual(content, RasterFileTest.RASTER_CONTENT) @patch("requests.get") def testFailure(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse(Response(None, 500)) content = client.get_raster_file(1, "FID.tif") self.assertTrue(content is None) @patch("requests.get") def testException(self, get_mock): with self.flask_app.app_context(): get_mock.side_effect = Exception() content = client.get_raster_file(1, "FID.tif") self.assertTrue(content is None) class GeoJSONTest(BaseApiTest): GEOJSON = { "type": "FeatureCollection", "features": [ { "id": "FEATURE_ID", "type": "Feature", "geometry": { "type": "Point", "coordinates": [7.4, 46.0], }, "properties": { "units": {"var1": "MW", "var2": "kWh", "var3": "kWh"}, "fields": { "field1": "value1", }, "legend": {"symbology": []}, "start_at": None, "variables": { "var1": 1000, "var2": 2000, "var3": None, }, }, }, ], } PARAMETERS = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_VARIABLE = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [ "var1", ], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_DEFAULT_VARIABLE = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [ "var1", ], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {"variable": "var1"}, } PARAMETERS_DEFAULT_VARIABLE2 = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [ "var1", "var2", ], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {"variable": "var2"}, } PARAMETERS_TIME_PERIOD = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": ["2015"], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_TIME_PERIOD_WITH_MONTH = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": ["2015-05"], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_NONE_TIME_PERIOD = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": ["2015", "None"], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_MONTH_TIME_PERIOD = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": "2015-01-01 00:00", "is_raster": False, "variables": [], "time_periods": ["07"], "temporal_granularity": None, }, "default_parameters": {}, } PARAMETERS_DEFAULT_TIME_PERIOD = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": ["2015"], "temporal_granularity": None, }, "default_parameters": {"start_at": "2015-01-01"}, } PARAMETERS_DEFAULT_TIME_PERIOD2 = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": ["2015", "2016"], "temporal_granularity": None, }, "default_parameters": {"start_at": "2016-01-01"}, } PARAMETERS_DEFAULT_FIELDS = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {"fields": {"field1": "value"}}, } PARAMETERS_DEFAULT_EMPTY_FIELDS = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {"fields": {}}, } PARAMETERS_DEFAULT_LEVEL = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": [], "temporal_granularity": None, }, "default_parameters": {"level": "{country}"}, } PARAMETERS_DEFAULT_INTERSECTING = { "end_at": None, "parameters": { "end_at": None, "fields": [], "levels": [], "is_tiled": False, "start_at": None, "is_raster": False, "variables": [], "time_periods": [], "temporal_granularity": None, }, "default_parameters": { "intersecting": ( "POLYGON((2.29 48.88,2.29 48.87,2.3 48.87,2.3 48.88,2.29 48.88))" ) }, } def checkCallArguments(self, get_mock): self.assertEqual(get_mock.call_count, 1) self.assertEqual(len(get_mock.call_args.args), 1) self.assertEqual(get_mock.call_args.args[0], "rpc/enermaps_query_geojson") self.assertEqual(len(get_mock.call_args.kwargs), 2) self.assertTrue("headers" in get_mock.call_args.kwargs) self.assertTrue("Authorization" in get_mock.call_args.kwargs["headers"]) self.assertTrue("params" in get_mock.call_args.kwargs) self.assertTrue("parameters" in get_mock.call_args.kwargs["params"]) self.assertTrue("row_offset" in get_mock.call_args.kwargs["params"]) self.assertTrue("row_limit" in get_mock.call_args.kwargs["params"]) self.assertEqual(get_mock.call_args.kwargs["params"]["row_offset"], 0) self.assertEqual(get_mock.call_args.kwargs["params"]["row_limit"], 1000) return json.loads(get_mock.call_args.kwargs["params"]["parameters"]) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS)), ) def testSimple(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1) geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("variable" not in req_parameters) self.assertTrue("start_at" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_VARIABLE)), ) def testWithVariable(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1, variable="var1") geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("variable" in req_parameters) self.assertEqual(req_parameters["variable"], "'var1'") self.assertTrue("start_at" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_DEFAULT_VARIABLE)), ) def testWithDefaultVariable(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1) geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("variable" in req_parameters) self.assertEqual(req_parameters["variable"], "'var1'") self.assertTrue("start_at" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_DEFAULT_VARIABLE2)), ) def testWithVariableOverridingDefault(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1, variable="var1") geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("variable" in req_parameters) self.assertEqual(req_parameters["variable"], "'var1'") self.assertTrue("start_at" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_TIME_PERIOD)), ) def testWithTimePeriod(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1, time_period="2015") geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("start_at" in req_parameters) self.assertEqual(req_parameters["start_at"], "'2015-01-01'") self.assertTrue("variable" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_TIME_PERIOD_WITH_MONTH)), ) def testWithTimePeriodWithMonth(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name( path.VECTOR, 1, time_period="2015-05" ) geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("start_at" in req_parameters) self.assertEqual(req_parameters["start_at"], "'2015-05-01'") self.assertTrue("variable" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level" not in req_parameters) self.assertEqual(geojson, GeoJSONTest.GEOJSON) @patch("requests.get") @patch( "app.common.client.get_parameters", new=Mock(return_value=datasets.convert(PARAMETERS_NONE_TIME_PERIOD)), ) def testWithNoneTimePeriod(self, get_mock): with self.flask_app.app_context(): get_mock.return_value = setupResponse( Response(json.dumps(GeoJSONTest.GEOJSON)) ) layer_name = path.make_unique_layer_name(path.VECTOR, 1, time_period="None") geojson = client.get_geojson(layer_name) req_parameters = self.checkCallArguments(get_mock) self.assertTrue("data.ds_id" in req_parameters) self.assertEqual(req_parameters["data.ds_id"], 1) self.assertTrue("start_at" in req_parameters) self.assertTrue(req_parameters["start_at"] is None) self.assertTrue("variable" not in req_parameters) self.assertTrue("intersecting" not in req_parameters) self.assertTrue("fields" not in req_parameters) self.assertTrue("level"
<gh_stars>1-10 # %% import itertools from datetime import datetime, timezone from pathlib import Path from typing import List, Optional, Tuple, Union import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from IPython.display import display # noqa F401 from matplotlib.dates import DateFormatter, DayLocator from matplotlib.legend import Legend from matplotlib.ticker import ( LogLocator, MultipleLocator, NullFormatter, StrMethodFormatter, ) from typing_extensions import Literal from constants import ( ABCStrictEnum, CaseInfo, CaseTypes, Columns, Counting, DiseaseStage, InfoField, Paths, Select, ) from plotting_utils import ( ADTL_DAY_INDICES, COLOR, START_DATE, LocationColorMapping, form_doubling_time_colname, get_color_palette_assignments, get_current_case_data, get_savefile_path_and_location_heading, remove_empty_leading_dates, ) matplotlib.use("agg") class EdgeGuide(ABCStrictEnum): """An enum whose cases represent which edge of the graph text is to be aligned with """ RIGHT: "EdgeGuide" = "right" TOP: "EdgeGuide" = "top" def _add_doubling_time_lines( fig: plt.Figure, ax: plt.Axes, *, stage: DiseaseStage, count: Counting, x_axis: Columns.XAxis, ): """Add doubling time lines to the given plot On a log-scale graph, doubling time lines originate from a point near the lower-left and show how fast the number of cases (per capita) would grow if it doubled every n days. :param fig: The figure containing the plots :type fig: plt.Figure :param ax: The axes object we wish to annotate :type ax: plt.Axes :param x_axis: The column to be used for the x-axis of the graph. We only add doubling time lines for graphs plotted against days since outbreak (and not actual days, as doubling time lines don't make sense then because there is no common origin to speak of) :type x_axis: Columns.XAxis :param stage: The disease stage we are plotting :type stage: DiseaseStage :param count: The count method used :type count: Counting """ DiseaseStage.verify(stage) Counting.verify(count) Columns.XAxis.verify(x_axis) # For ease of computation, everything will be in axes coordinate system # Variable names beginning with "ac" refer to axis coords and "dc" to data coords # {ac,dc}_{x,y}_{min,max} refer to the coordinates of the doubling-time lines if x_axis is Columns.XAxis.DAYS_SINCE_OUTBREAK: # Create transformation from data coords to axes coords # This composes two transforms, data -> fig and (axes -> fig)^(-1) dc_to_ac = ax.transData + ax.transAxes.inverted() dc_x_lower_lim, dc_x_upper_lim = ax.get_xlim() dc_y_lower_lim, dc_y_upper_lim = ax.get_ylim() # Adding stuff causes the axis to resize itself, and we have to stop it # from doing so (by setting it back to its original size) ax.set_xlim(dc_x_lower_lim, dc_x_upper_lim) # Also need to add back margin dc_y_upper_lim = dc_to_ac.inverted().transform((0, 1.1))[1] ax.set_ylim(dc_y_lower_lim, dc_y_upper_lim) # Getting min x,y bounds of lines is easy dc_x_min = 0 dc_y_min = CaseInfo.get_info_item_for( InfoField.THRESHOLD, stage=stage, count=count ) ac_x_min, ac_y_min = dc_to_ac.transform((dc_x_min, dc_y_min)) # Getting max x,y bounds is trickier due to needing to use the maximum # extent of the graph area # Get top right corner of graph in data coords (to avoid the edges of the # texts' boxes clipping the axes, we move things in just a hair) ac_x_upper_lim = ac_y_upper_lim = 1 doubling_times = [1, 2, 3, 4, 7, 14] # days (x-axis units) for dt in doubling_times: # Simple math: assuming dc_y_max := dc_y_upper_lim, then if # dc_y_max = dc_y_min * 2**((dc_x_max-dc_x_min)/dt), # then... dc_x_max = dc_x_min + dt * np.log2(dc_y_upper_lim / dc_y_min) ac_x_max, ac_y_max = dc_to_ac.transform((dc_x_max, dc_y_upper_lim)) # We try to use ac_y_max=1 by default, and if that leads to too long a line # (sticking out through the right side of the graph) then we use ac_x_max=1 # instead and compute ac_y_max accordingly if ac_x_max > ac_x_upper_lim: dc_y_max = dc_y_min * 2 ** ((dc_x_upper_lim - dc_x_min) / dt) ac_x_max, ac_y_max = dc_to_ac.transform((dc_x_upper_lim, dc_y_max)) edge = EdgeGuide.RIGHT else: edge = EdgeGuide.TOP # Plot the lines themselves ax.plot( [ac_x_min, ac_x_max], [ac_y_min, ac_y_max], transform=ax.transAxes, color="0.0", alpha=0.7, dashes=(1, 2), linewidth=1, ) # Annotate lines with assocated doubling times # Get text to annotate with n_weeks, weekday = divmod(dt, 7) if weekday == 0: annot_text_str = f"{n_weeks} week" if n_weeks != 1: annot_text_str += "s" else: annot_text_str = f"{dt} day" if dt != 1: annot_text_str += "s" text_props = { "bbox": { "fc": "1.0", "pad": 0, # "edgecolor": "1.0", "alpha": 0.7, "lw": 0, } } # Plot in a temporary location just to get the text box size; we'll move and # rotate later plotted_text = ax.text( 0, 0, annot_text_str, text_props, transform=ax.transAxes ) ac_line_slope = (ac_y_max - ac_y_min) / (ac_x_max - ac_x_min) ac_text_angle_rad = np.arctan(ac_line_slope) sin_ac_angle = np.sin(ac_text_angle_rad) cos_ac_angle = np.cos(ac_text_angle_rad) # Get the unrotated text box bounds ac_text_box = plotted_text.get_window_extent( fig.canvas.get_renderer() ).transformed(ax.transAxes.inverted()) ac_text_width = ac_text_box.x1 - ac_text_box.x0 ac_text_height = ac_text_box.y1 - ac_text_box.y0 # Compute the width and height of the upright rectangle bounding the rotated # text box in axis coordinates # Simple geometry (a decent high school math problem) # We cheat a bit; to create some padding between the rotated text box and # the axes, we can add the padding directly to the width and height of the # upright rectangle bounding the rotated text box # This works because the origin of the rotated text box is in the lower left # corner of the upright bounding rectangle, so anything added to these # dimensions gets added to the top and right, pushing it away from the axes # and producing the padding we want # If we wanted to do this the "right" way we'd *redo* the calculations above # but with ac_x_upper_lim = ac_y_upper_lim = 1 - padding PADDING = 0.005 ac_rot_text_width = ( (ac_text_width * cos_ac_angle) + (ac_text_height * sin_ac_angle) + PADDING ) ac_rot_text_height = ( (ac_text_width * sin_ac_angle) + (ac_text_height * cos_ac_angle) + PADDING ) # Perpendicular distance from text to corresponding line AC_DIST_FROM_LINE = 0.005 # Get text box origin relative to line upper endpoint EdgeGuide.verify(edge) if edge is EdgeGuide.RIGHT: # Account for bit of overhang; when slanted, top left corner of the # text box extends left of the bottom left corner, which is its origin # Subtracting that bit of overhang (height * sin(theta)) gets us the # x-origin # This only applies to the x coord; the bottom left corner of the text # box is also the bottom of the rotated rectangle ac_text_origin_x = ac_x_max - ( ac_rot_text_width - ac_text_height * sin_ac_angle ) ac_text_origin_y = ( ac_y_min + (ac_text_origin_x - ac_x_min) * ac_line_slope + AC_DIST_FROM_LINE / cos_ac_angle ) # If text box is in very top right of graph, it may use only the right # edge of the graph as a guide and hence clip through the top; if that # happens, it's effectively the same situation as using the top edge from # the start if ( edge is EdgeGuide.TOP # Must go first to short-circuit or ac_text_origin_y + ac_rot_text_height > ac_y_upper_lim ): ac_text_origin_y = ac_y_upper_lim - ac_rot_text_height ac_text_origin_x = ( ac_x_min - AC_DIST_FROM_LINE / sin_ac_angle + (ac_text_origin_y - ac_y_min) / ac_line_slope ) # set_x and set_y work in axis coordinates plotted_text.set_x(ac_text_origin_x) plotted_text.set_y(ac_text_origin_y) plotted_text.set_horizontalalignment("left") plotted_text.set_verticalalignment("bottom") plotted_text.set_rotation(ac_text_angle_rad * 180 / np.pi) # takes degrees plotted_text.set_rotation_mode("anchor") def _format_legend( *, ax: plt.Axes, x_axis: Columns.XAxis, count: Counting, location_heading: str, current_case_counts: pd.DataFrame, ) -> Legend: """Format the legend correctly so that relevant info is shown in the legends' rows In addition to just displaying which location maps to a given line color, and which case type to a given line style, we also display some current data in the legend (e.g., this location currently has this many cases). :param ax: The axis to add the legend to :type ax: plt.Axes :param x_axis: The x axis column we're plotting against :type x_axis: Columns.XAxis :param count: Which count we're using :type count: Counting :param location_heading: The name we'll use for the location heading ("Country", "State", etc.) :type location_heading: str :param current_case_counts: Dataframe with current case counts; used to add data to the legend :type current_case_counts: pd.DataFrame :return: The added legend :rtype: Legend """ Counting.verify(count) Columns.XAxis.verify(x_axis) include_confirmed = x_axis is Columns.XAxis.DATE include_deaths = x_axis is Columns.XAxis.DATE include_doubling_time = x_axis is Columns.XAxis.DAYS_SINCE_OUTBREAK include_mortality = x_axis is Columns.XAxis.DATE and count is Counting.TOTAL_CASES include_start_date = (not include_mortality) and ( x_axis is Columns.XAxis.DAYS_SINCE_OUTBREAK ) # Add (formatted)
parser: GraphiteParser, time_range: TimeRange, cache_raw: bool, sparseness_disabled: bool) -> \ Dict[str, TimeSeriesGroup]: """ :param parser: Single graphite target :param time_range: TimeRange over which the query should be executed :return: """ if parser.trace: logger.info('[Trace: %s]: GraphiteApi.execute: %s', parser.query_id, time_range) # recursively execute the methods result: List[List[TimeSeries]] = self._execute_args( schema, parser.args, time_range, cache_raw, sparseness_disabled, parser.repopulate_cache, parser.trace, parser.lookback_sec, parser.optimize_lookback, parser.query_id) if len(result) <= 0: return dict() if len(result) > 1: raise ValueError('Unexpected state: expected only one ' 'time series list. Found: {}'.format(len(result))) tsg_result: Dict[str, TimeSeriesGroup] = dict() tsg_result['_'] = TimeSeriesGroup({'_': result[0]}) return tsg_result # pylint: disable-msg=R0913 # Too many arguments def _execute_args(self, schema: Schema, args, time_range: TimeRange, cache_raw: bool, sparseness_disabled: bool, repopulate_cache: bool, trace: bool, lookback_sec: int, optimize_lookback: int, query_id: str) -> List: """ :param args: :param time_range :param trace :param query_id :return: """ arg_results = list() for arg in args: if 'func' in arg: arg_results.append(self._execute_func( schema, arg, time_range, cache_raw, sparseness_disabled, repopulate_cache, trace, lookback_sec, optimize_lookback, query_id)) elif 'field' in arg: arg_results.append(self._execute_field( schema, arg, time_range, cache_raw, sparseness_disabled, repopulate_cache, trace, lookback_sec, optimize_lookback, query_id)) elif 'literal' in arg: arg_results.append(arg['literal']) else: raise ValueError('Unsupported argument: ' + json.dumps(arg)) return arg_results # pylint: disable-msg=R0913 # Too many arguments def _execute_field(self, schema: Schema, field_exp: dict, time_range: TimeRange, cache_raw: bool, sparseness_disabled: bool, repopulate_cache: bool, trace: bool, lookback_sec: int, optimize_lookback: int, query_id: str) -> List[TimeSeries]: """ Fetch "field" (ie. sparse series) from Druid and convert to dense form. If cache_raw is True, we will be caching the output from DruidReader. So first we will check if any cached results exist, and fetch only the missing data. :param field_exp: :param time_range: :param cache_raw: :param sparseness_disabled: :param repopulate_cache: :param trace: :param query_id: :return: """ if not cache_raw: # Raw results are not cached. Fetch data tsgr = self._fetch_series_from_druid(schema, field_exp, time_range, sparseness_disabled, trace, lookback_sec, optimize_lookback, query_id) else: # Raw results are being cached. Check cache cache_key = self._cache_key(schema, field_exp[FIELD], time_range) query_cache_result = None if repopulate_cache: # User has requested clearing of any previously existing cache # entries for the query, and asked us to repopulate with a # fresh set of results QueryCache.inst.clear(cache_key) else: # Fetch from cache cache_entries = QueryCache.inst.get(cache_key, self.user) if cache_entries is not None: query_cache_result = QueryCache.inst.prune( cache_entries, time_range, user=self.user) if query_cache_result is None: # Total miss. Fetch data and update cache tsgr = self._fetch_series_from_druid(schema, field_exp, time_range, sparseness_disabled, trace, lookback_sec, optimize_lookback, query_id) new_entry = QueryCacheEntry(time_range, {'_': tsgr}) QueryCache.inst.set(cache_key, [new_entry], self.user, trace, query_id) elif not query_cache_result.missed_ranges: # Total hit tsgr = query_cache_result.cached_results['_'] else: # We have a partial hit, fetch the missing ranges and merge for missing_range in query_cache_result.missed_ranges: tsgr = self._fetch_series_from_druid(schema, field_exp, missing_range, sparseness_disabled, trace, lookback_sec, optimize_lookback, query_id) new_entry = QueryCacheEntry(missing_range, {'_': tsgr}) cache_entries = QueryCache.inst.merge(cache_entries, new_entry, self.user) # We prune the merged cache entries to the required time range query_cache_result: QueryCacheResult = \ QueryCache.inst.prune(cache_entries, time_range, user=self.user) # We also update the backend with the merged cache entries QueryCache.inst.set(cache_key, cache_entries, self.user, trace=trace, trace_id=query_id) tsgr = query_cache_result.cached_results['_'] if len(tsgr.groups) <= 0: return [] # we do not do any group by operation in druid_reader # group by is based on the graphite functions so we just # pass the default list of all series matched by the field pattern tsl: List[TimeSeries] = tsgr.groups['_'] return tsl # pylint: disable-msg=R0913 # Too many arguments def _fetch_series_from_druid(self, schema: Schema, field_exp: dict, time_range: TimeRange, sparseness_disabled: bool, trace: bool, lookback_sec: int, optimize_lookback: int, query_id: str) -> TimeSeriesGroup: """ Fetch sparse series from Druid and convert to dense form. :param field_exp: :param time_range: :param sparseness_disabled: :param trace: :param query_id: :return: """ measurement_prefix, field, cluster_id, filters = \ GraphiteApi._convert_flat_field_to_filters(field_exp[FIELD]) tags = dict() SqlParser.extract_tag_conditions_from_filters(filters, tags) tags[FIELD] = field if trace: logger.info('[Trace: %s] tags=%s cluster_id=%s ' 'measurement_prefix=%s', query_id, tags, cluster_id, measurement_prefix) datasource = DruidReader.inst.get_datasource(schema, measurement_prefix, tags, cluster_id, time_range.interval, protocol='graphite', trace=trace, query_id=query_id) tsgr = DruidReader.inst.get_field(schema, datasource, field, filters, time_range, [], query_id, trace, GraphiteFunctionsProvider.fill_func, GraphiteFunctionsProvider.fill_value, True, sparseness_disabled, lookback_sec, optimize_lookback, user=self.user ) if trace: tsgr.log('[Trace: {}]: get_field({})'.format(query_id, field)) if len(tsgr.groups) > 1: raise ValueError('Expected only default group from DruidReader.' 'Found: {} groups instead' .format(','.join(tsgr.groups.keys()))) return tsgr # pylint: disable-msg=R0913 # Too many arguments def _execute_func(self, schema: Schema, func, time_range: TimeRange, cache_raw: bool, sparseness_disabled: bool, repopulate_cache: bool, trace: bool, lookback_sec: int, optimize_lookback: int, query_id: str) -> List[TimeSeries]: """ :param func: :param time_range :param trace :param query_id :return: """ arg_results = self._execute_args( schema, func['args'], time_range, cache_raw, sparseness_disabled, repopulate_cache, trace, lookback_sec, optimize_lookback, query_id) # check if its a graphite function func_name = func['func'] func_impl = getattr(GraphiteFunctionsProvider, func['func'], 'unknown') if func_impl == 'unknown': raise ValueError('Unsupported func: {}'.format(func_name)) # The first argument must always be time_range arg_results.insert(0, time_range) if trace: logger.info('[Trace: %s] Func %s, arg_results: %s', query_id, func, arg_results) tslr = func_impl(*arg_results) if trace: TimeSeriesGroup.log_tsl('[Trace: {}]: {}'.format(query_id, func), tslr) return tslr @staticmethod def results_cache_invalidating_funcs_exist(args) -> bool: """ Check if target contains any functions that would invalidate the use of results cache. We will cache raw data in such cases. :param args: :param time_range :param trace :param query_id :return: """ for arg in args: if 'func' in arg: func_name = arg['func'] if func_name in ['countSeries', 'countNewSeries', 'highest', 'highestAverage', 'highestCurrent', 'highestMax', 'currentAbove', 'averageAbove', 'maximumAbove', 'nPercentile', 'removeBelowPercentile', 'removeAbovePercentile', 'integral', 'limit', 'summarize', 'movingAverage']: # Merging cached results from two different time ranges # leads to incorrect results if any of these functions are # being used. return True if func_name == 'removeEmptySeries': # removeEmptySeries(XXX, xFilesFactor) where xFilesFactor # is not equal to 0.0. It is not safe to use the query # cache results in this case. # # For example, say we ran removeEmptySeries(X, 0.5) on the # series X which has the following datapoints for the time # range [0, 30]: # [(0, None), (10, None), (20, 100.0), (30, 100.0)] # The series will not be removed because it has atleast # 50% non-NULL datapoints. # # Now if the same query comes in for the time range # [0, 20], the right result is an empty list []. This is # because there are 1/3 non-NULL datapoints in the time # range [0, 20] which is not sufficient to meet the 50% # non-NULL criteria. # # But suppose if we had fetched the old cached result for # the time range [0, 30], and pruned it to the time range # [0, 20], we would end up returning the incorrect result: # (0, None), (10, None), (20, 100.0)] if (len(arg['args']) > 1 and 'literal' in arg['args'][1] and arg['args'][1]['literal'] != 0.0): return True # Some other function. We have to recursively look at its # arguments and confirm that none of them use any functions # which invalidate the cache if GraphiteApi.results_cache_invalidating_funcs_exist( arg['args']): return True return False @staticmethod def _convert_flat_field_to_filters(field: str) \ -> Tuple[str, str, str, Dict]: """ We parse the first three tokens as tags to reduce the impact of LIKE query :param field: :return: """ tokens = field.split('.') filters = {'and': []} for i in range(len(tokens) - 1): if tokens[i].startswith('{') and tokens[i].endswith('}'): # This is a set pattern. Eg. {InfluxDB,Telegraf} # We need to convert this to multiple OR clauses clauses = [] # The separator is ";" . GraphiteParser.convert_set_patterns() # will convert {a,b,c} to {a;b;c} words = tokens[i].strip('{').strip('}').split(";") for word in words: clauses.append({'=': [TOKEN_TAG_PREFIX + str(i), {'literal': word}]}) filters['and'].append({'or': clauses}) else: filters['and'].append({'=': [TOKEN_TAG_PREFIX + str(i), {'literal': tokens[i]}]}) # add the dot count tag, to make sure we don't match fields # that share the same prefix filters['and'].append({'=': [TOKEN_COUNT, {'literal': str(len(tokens))}]}) if field.startswith(CLUSTERED_FLAT_METRIC_PREFIX): # Last token is sub-field sub_field = tokens[-1] cluster_id = tokens[1] # We treat the metric after taking out the clusters prefix, cluster # ID and node name to be the measurement for Graphite. Measurement # also includes the last token measurement_prefix = '.'.join(tokens[3:]) else: # Not clusters.* metrics. We treat the whole flat metric as the # measurement prefix, including the last token which gets treated # as the field measurement_prefix = '.'.join(tokens) # Last
# branchmap.py - logic to computes, maintain and stores branchmap for local repo # # Copyright 2005-2007 <NAME> <<EMAIL>> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import struct from .node import ( bin, hex, nullid, nullrev, ) from . import ( encoding, error, pycompat, scmutil, util, ) from .utils import ( repoviewutil, stringutil, ) if pycompat.TYPE_CHECKING: from typing import ( Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union, ) assert any( ( Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union, ) ) subsettable = repoviewutil.subsettable calcsize = struct.calcsize pack_into = struct.pack_into unpack_from = struct.unpack_from class BranchMapCache(object): """mapping of filtered views of repo with their branchcache""" def __init__(self): self._per_filter = {} def __getitem__(self, repo): self.updatecache(repo) return self._per_filter[repo.filtername] def updatecache(self, repo): """Update the cache for the given filtered view on a repository""" # This can trigger updates for the caches for subsets of the filtered # view, e.g. when there is no cache for this filtered view or the cache # is stale. cl = repo.changelog filtername = repo.filtername bcache = self._per_filter.get(filtername) if bcache is None or not bcache.validfor(repo): # cache object missing or cache object stale? Read from disk bcache = branchcache.fromfile(repo) revs = [] if bcache is None: # no (fresh) cache available anymore, perhaps we can re-use # the cache for a subset, then extend that to add info on missing # revisions. subsetname = subsettable.get(filtername) if subsetname is not None: subset = repo.filtered(subsetname) bcache = self[subset].copy() extrarevs = subset.changelog.filteredrevs - cl.filteredrevs revs.extend(r for r in extrarevs if r <= bcache.tiprev) else: # nothing to fall back on, start empty. bcache = branchcache() revs.extend(cl.revs(start=bcache.tiprev + 1)) if revs: bcache.update(repo, revs) assert bcache.validfor(repo), filtername self._per_filter[repo.filtername] = bcache def replace(self, repo, remotebranchmap): """Replace the branchmap cache for a repo with a branch mapping. This is likely only called during clone with a branch map from a remote. """ cl = repo.changelog clrev = cl.rev clbranchinfo = cl.branchinfo rbheads = [] closed = set() for bheads in pycompat.itervalues(remotebranchmap): rbheads += bheads for h in bheads: r = clrev(h) b, c = clbranchinfo(r) if c: closed.add(h) if rbheads: rtiprev = max((int(clrev(node)) for node in rbheads)) cache = branchcache( remotebranchmap, repo[rtiprev].node(), rtiprev, closednodes=closed, ) # Try to stick it as low as possible # filter above served are unlikely to be fetch from a clone for candidate in (b'base', b'immutable', b'served'): rview = repo.filtered(candidate) if cache.validfor(rview): self._per_filter[candidate] = cache cache.write(rview) return def clear(self): self._per_filter.clear() def _unknownnode(node): """raises ValueError when branchcache found a node which does not exists""" raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node))) def _branchcachedesc(repo): if repo.filtername is not None: return b'branch cache (%s)' % repo.filtername else: return b'branch cache' class branchcache(object): """A dict like object that hold branches heads cache. This cache is used to avoid costly computations to determine all the branch heads of a repo. The cache is serialized on disk in the following format: <tip hex node> <tip rev number> [optional filtered repo hex hash] <branch head hex node> <open/closed state> <branch name> <branch head hex node> <open/closed state> <branch name> ... The first line is used to check if the cache is still valid. If the branch cache is for a filtered repo view, an optional third hash is included that hashes the hashes of all filtered revisions. The open/closed state is represented by a single letter 'o' or 'c'. This field can be used to avoid changelog reads when determining if a branch head closes a branch or not. """ def __init__( self, entries=(), tipnode=nullid, tiprev=nullrev, filteredhash=None, closednodes=None, hasnode=None, ): # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None """hasnode is a function which can be used to verify whether changelog has a given node or not. If it's not provided, we assume that every node we have exists in changelog""" self.tipnode = tipnode self.tiprev = tiprev self.filteredhash = filteredhash # closednodes is a set of nodes that close their branch. If the branch # cache has been updated, it may contain nodes that are no longer # heads. if closednodes is None: self._closednodes = set() else: self._closednodes = closednodes self._entries = dict(entries) # whether closed nodes are verified or not self._closedverified = False # branches for which nodes are verified self._verifiedbranches = set() self._hasnode = hasnode if self._hasnode is None: self._hasnode = lambda x: True def _verifyclosed(self): """ verify the closed nodes we have """ if self._closedverified: return for node in self._closednodes: if not self._hasnode(node): _unknownnode(node) self._closedverified = True def _verifybranch(self, branch): """ verify head nodes for the given branch. """ if branch not in self._entries or branch in self._verifiedbranches: return for n in self._entries[branch]: if not self._hasnode(n): _unknownnode(n) self._verifiedbranches.add(branch) def _verifyall(self): """ verifies nodes of all the branches """ needverification = set(self._entries.keys()) - self._verifiedbranches for b in needverification: self._verifybranch(b) def __iter__(self): return iter(self._entries) def __setitem__(self, key, value): self._entries[key] = value def __getitem__(self, key): self._verifybranch(key) return self._entries[key] def __contains__(self, key): self._verifybranch(key) return key in self._entries def iteritems(self): for k, v in pycompat.iteritems(self._entries): self._verifybranch(k) yield k, v items = iteritems def hasbranch(self, label): """ checks whether a branch of this name exists or not """ self._verifybranch(label) return label in self._entries @classmethod def fromfile(cls, repo): f = None try: f = repo.cachevfs(cls._filename(repo)) lineiter = iter(f) cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) last, lrev = cachekey[:2] last, lrev = bin(last), int(lrev) filteredhash = None hasnode = repo.changelog.hasnode if len(cachekey) > 2: filteredhash = bin(cachekey[2]) bcache = cls( tipnode=last, tiprev=lrev, filteredhash=filteredhash, hasnode=hasnode, ) if not bcache.validfor(repo): # invalidate the cache raise ValueError('tip differs') bcache.load(repo, lineiter) except (IOError, OSError): return None except Exception as inst: if repo.ui.debugflag: msg = b'invalid %s: %s\n' repo.ui.debug( msg % ( _branchcachedesc(repo), pycompat.bytestr( inst ), # pytype: disable=wrong-arg-types ) ) bcache = None finally: if f: f.close() return bcache def load(self, repo, lineiter): """fully loads the branchcache by reading from the file using the line iterator passed""" for line in lineiter: line = line.rstrip(b'\n') if not line: continue node, state, label = line.split(b" ", 2) if state not in b'oc': raise ValueError('invalid branch state') label = encoding.tolocal(label.strip()) node = bin(node) self._entries.setdefault(label, []).append(node) if state == b'c': self._closednodes.add(node) @staticmethod def _filename(repo): """name of a branchcache file for a given repo or repoview""" filename = b"branch2" if repo.filtername: filename = b'%s-%s' % (filename, repo.filtername) return filename def validfor(self, repo): """Is the cache content valid regarding a repo - False when cached tipnode is unknown or if we detect a strip. - True when cache is up to date or a subset of current repo.""" try: return (self.tipnode == repo.changelog.node(self.tiprev)) and ( self.filteredhash == scmutil.filteredhash(repo, self.tiprev) ) except IndexError: return False def _branchtip(self, heads): """Return tuple with last open head in heads and false, otherwise return last closed head and true.""" tip = heads[-1] closed = True for h in reversed(heads): if h not in self._closednodes: tip = h closed = False break return tip, closed def branchtip(self, branch): """Return the tipmost open head on branch head, otherwise return the tipmost closed head on branch. Raise KeyError for unknown branch.""" return self._branchtip(self[branch])[0] def iteropen(self, nodes): return (n for n in nodes if n not in self._closednodes) def branchheads(self, branch, closed=False): self._verifybranch(branch) heads = self._entries[branch] if not closed: heads = list(self.iteropen(heads)) return heads def iterbranches(self): for bn, heads in pycompat.iteritems(self): yield (bn, heads) + self._branchtip(heads) def iterheads(self): """ returns all the heads """ self._verifyall() return pycompat.itervalues(self._entries) def copy(self): """return an deep copy of the branchcache object""" return type(self)( self._entries, self.tipnode, self.tiprev, self.filteredhash, self._closednodes, ) def write(self, repo): try: f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True) cachekey = [hex(self.tipnode), b'%d' % self.tiprev] if self.filteredhash is not None: cachekey.append(hex(self.filteredhash)) f.write(b" ".join(cachekey) + b'\n') nodecount = 0 for label, nodes in sorted(pycompat.iteritems(self._entries)): label = encoding.fromlocal(label) for node in nodes: nodecount += 1 if node in self._closednodes: state
out, mxdepth, mxargs, adsize, indent, curdepth) return # Check list if isinstance(expr, list): _pretty_print_children('[', ']', expr, out, mxdepth, mxargs, adsize, indent, curdepth) return # Check if expression is not CpoExpr if not isinstance(expr, CpoExpr): out.write(str(expr)) return # Check if expression has a name if expr.has_name(): out.write(expr.get_name()) return # Check expression type t = expr.type if t.is_constant: out.write(str(expr)) elif t.is_variable: out.write(str(expr)) elif t.is_array: _pretty_print_children('[', ']', expr.children, out, mxdepth, mxargs, adsize, indent, curdepth) else: _pretty_print_children(str(expr.operation.cpo_name) + '(', ')', expr.children, out, mxdepth, mxargs, adsize, indent, curdepth) def get_domain_min(d): """ Retrieves the lower bound of an integer or interval variable domain. The domain can be either: * a single number value, * a list of numbers, or tuple of 2 numbers representing an interval. This method returns the first number of the domain. Args: d: Domain Returns: Domain lower bound """ if isinstance(d, (tuple, list)): d = d[0] if isinstance(d, (tuple, list)): d = d[0] return d def get_domain_max(d): """ Retrieves the upper bound of an integer or interval variable domain. The domain can be either: * a single number value, * a list of numbers, or tuple of 2 numbers representing an interval. This method returns the last number of the domain. Args: d: Domain Returns: Domain upper bound """ if isinstance(d, (tuple, list)): d = d[-1] if isinstance(d, (tuple, list)): d = d[-1] return d #----------------------------------------------------------------------------- # Private Functions #----------------------------------------------------------------------------- def _pretty_print_children(lstart, lend, lexpr, out, mxdepth, mxargs, adsize, indent, curdepth): """ Pretty print list of expression Args: lstart: List start banner lend: List end banner lexpr: List of expression to take children from out: Print output (stream or file name, None for stdout) mxdepth: Max print depth. Default is None (no limit) mxargs: Max number of function or array arguments. Default is None (no limit) adsize: Add size of expressions in nodes indent: Indentation string curdepth: Current print depth """ # Write start banner out.write(lstart) nindent = indent + " | " if adsize: out.write(" size:" + str(get_node_count(lexpr))) # Write expressions nbargs = len(lexpr) curdepth += 1 if (mxargs is None) or (nbargs <= mxargs): for x in lexpr: out.write('\n') pretty_print(x, out, mxdepth, mxargs, adsize, nindent, curdepth) else: eargs = mxargs // 2 bargs = mxargs - eargs for x in lexpr[:bargs]: out.write('\n') pretty_print(x, out, mxdepth, mxargs, adsize, nindent, curdepth) out.write('\n') out.write(nindent) out.write("...(+" + str(nbargs - mxargs) + ")...") for x in lexpr[-eargs:]: out.write('\n') pretty_print(x, out, mxdepth, mxargs, adsize, nindent, curdepth) # Write end banner # out.write('\n') # out.write(indent) out.write(lend) def _domain_iterator(d): """ Iterator on the individual values of an integer or interval variable domain. Args: d: Domain to iterate Returns: Domain iterator """ if isinstance(d, (tuple, list)): for x in d: if isinstance(x, (list, tuple)): min, max = x if min == max: yield min else: for v in range(min, max + 1): yield v else: yield x else: yield d def _domain_contains(d, val): """ Check whether a domain contains a given value Args: d: Domain val: Value to check Returns: True if value is in the domain, False otherwise """ if isinstance(d, (tuple, list)): for x in d: if isinstance(x, (list, tuple)): min, max = x if min <= val <= max: return True elif x == val: return True return False return d == val #----------------------------------------------------------------------------- # CPO expressions cache #----------------------------------------------------------------------------- # This cache is used to retrieve the CPO expression that corresponds to a Python expression # that is used multiple times in a model. # This allows to: # - speed-up conversion as expression type has not to be recompute again # - reduce CPO file length as common expressions are easily identified. class _CacheKeyTuple(tuple): """ Tuple that is used as a key of the expression cache It implements a hash function based on ids of the elements, enabling: * use of non hashable elements, * comparison of CPO expressions that would not be possible because overloading of __eq__ that generates CPO expressions. """ def __init__(self, tpl): super(_CacheKeyTuple, self).__init__() def __eq__(self, other): return isinstance(other, tuple) and len(other) == len(self) and all(x1 is x2 for x1, x2 in zip(self, other)) def __hash__(self): hash = 1 for e in self: hash = hash * 31 + id(e) return hash class _ModelExpressionsCache(object): """ Limited size object cache for CPO expressions. This cache is limited in size. This means that, if the max size is reached, adding a new object removes the oldest. The key of the object cache dictionary is a either tuple (type, expr) or the id of the source Python object. The first tuple value 'type' is a string identifying the type of object, and 'expr' is the standardized value of the python expression, as given by the 'cache key builder' function. The object cache dictionary also reference expressions using source python object id, which is used for an initial search without normalizing the python value. If id is identical, the object type and normalized value is verified to be the same. The value of the object cache dictionary is a tuple (kval, type, cexpr, pexpr) where 'kval' is the normalized representation of the python object, 'type' is the type name, 'cexpr' is the corresponding CPO expression, and 'pexpr' is the original python expression (to preserve it from being garbaged). """ __slots__ = ('obj_dict', # Dictionary of objects 'key_list', # Ordered list of objects keys in the cache 'max_size', # Max cache size 'lock', # Lock to protect the cache 'nb_found_by_id', # Number of times object is found by id 'nb_found_by_value', # Number of times object is found by value 'nb_create_new', # Number of times a new object is created ) def __init__(self, maxsize): self.obj_dict = {} self.max_size = maxsize self.key_list = deque() self.lock = threading.Lock() self.nb_found_by_id = 0 self.nb_found_by_value = 0 self.nb_create_new = 0 def get_or_create(self, tname, pexpr, kbldr, xbldr): """ Get a value from the cache Args: tname: Object type identifier (string) pexpr: Python expression kbldr: Cache key builder xbldr: CPO model expression builder Returns: Value corresponding to the pexpr """ # Build normalized value key (needed to check with previous value in case it has been changed) try: kval = kbldr(pexpr) except TypeError: raise CpoException("Impossible to build a CP Optimizer expression from value '{}' of type '{}'".format(to_string(pexpr), type(pexpr))) # Search value in cache with self.lock: # First search from source expression id pid = id(pexpr) cval = self.obj_dict.get(pid) if cval is not None: # Verify if same type and if normalized values are identical if (tname == cval[1]) and (cval[0] == kval): self.nb_found_by_id += 1 return cval[2] # Search from type and normalized value ckey = (tname, kval) cval = self.obj_dict.get(ckey) if cval is not None: self.nb_found_by_value += 1 return cval[2] # Build new model expression cexpr = xbldr(kval) # Remove older object if max size is reached if len(self.key_list) >= self.max_size: kl = self.key_list.popleft() self.obj_dict.pop(kl[0]) # Remove python object id self.obj_dict.pop(kl[1]) # Remove cache expression key # Add new expression in the cache cpval = (kval, tname, cexpr, pexpr) self.obj_dict[pid] = cpval self.obj_dict[ckey] = cpval self.key_list.append((pid, ckey)) self.nb_create_new += 1 return cexpr def size(self): """ Get the current number of expressions in the cache """ return len(self.key_list) def clear(self): """ Clear all dictionary content """ with self.lock: self.obj_dict.clear() self.key_list.clear() self.nb_found_by_id = 0 self.nb_found_by_value = 0 self.nb_create_new = 0 def get_stats(self): """ Get the cache statistics as a tuple (size, nb_get_or_create, nb_found_by_id, nb_found_by_value, nb_create_new) """ return (len(self.key_list), self.nb_found_by_id, self.nb_found_by_value, self.nb_create_new) def __len__(self): """ Returns the number of elements in this dictionary """ return len(self.obj_dict) # Cache of CPO expressions corresponding to Python values _CACHE_CONTEXT = context.model.cache _CPO_VALUES_FROM_PYTHON = _ModelExpressionsCache(_CACHE_CONTEXT.size) _CACHE_ACTIVE = _CACHE_CONTEXT.active def build_cpo_expr(val): """ Builds a model expression from a given Python value. If active, this method uses a cache to return the same CpoExpr for the same value. Args: val: Value to convert (possibly already an expression). Returns: Corresponding expression. Raises: CpoException if conversion is not possible. """ # Check if already a CPO expression if
'name', 'pretty', 'export', 'exact'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_limit_range" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_limit_range`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_limit_range`") resource_path = '/api/v1/namespaces/{namespace}/limitranges/{name}'.replace('{format}', 'json') method = 'GET' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} if 'pretty' in params: query_params['pretty'] = params['pretty'] if 'export' in params: query_params['export'] = params['export'] if 'exact' in params: query_params['exact'] = params['exact'] header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='V1LimitRange', auth_settings=auth_settings, callback=params.get('callback')) return response def replace_namespaced_limit_range(self, body, namespace, name, **kwargs): """ replace the specified LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.replace_namespaced_limit_range(body, namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param V1LimitRange body: (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the LimitRange (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1LimitRange If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'namespace', 'name', 'pretty'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_limit_range" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_limit_range`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_limit_range`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_limit_range`") resource_path = '/api/v1/namespaces/{namespace}/limitranges/{name}'.replace('{format}', 'json') method = 'PUT' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} if 'pretty' in params: query_params['pretty'] = params['pretty'] header_params = {} form_params = {} files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='V1LimitRange', auth_settings=auth_settings, callback=params.get('callback')) return response def delete_namespaced_limit_range(self, body, namespace, name, **kwargs): """ delete a LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_namespaced_limit_range(body, namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param V1DeleteOptions body: (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the LimitRange (required) :param str pretty: If 'true', then the output is pretty printed. :return: UnversionedStatus If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'namespace', 'name', 'pretty'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_limit_range" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_limit_range`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_limit_range`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_limit_range`") resource_path = '/api/v1/namespaces/{namespace}/limitranges/{name}'.replace('{format}', 'json') method = 'DELETE' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} if 'pretty' in params: query_params['pretty'] = params['pretty'] header_params = {} form_params = {} files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='UnversionedStatus', auth_settings=auth_settings, callback=params.get('callback')) return response def patch_namespaced_limit_range(self, body, namespace, name, **kwargs): """ partially update the specified LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.patch_namespaced_limit_range(body, namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param UnversionedPatch body: (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the LimitRange (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1LimitRange If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'namespace', 'name', 'pretty'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_limit_range" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_limit_range`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_limit_range`") # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_limit_range`") resource_path = '/api/v1/namespaces/{namespace}/limitranges/{name}'.replace('{format}', 'json') method = 'PATCH' path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] if 'name' in params: path_params['name'] = params['name'] query_params = {} if 'pretty' in params: query_params['pretty'] = params['pretty'] header_params = {} form_params = {} files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='V1LimitRange', auth_settings=auth_settings, callback=params.get('callback')) return response def list_namespaced_persistent_volume_claim(self, namespace, **kwargs): """ list or watch objects of kind PersistentVolumeClaim This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_namespaced_persistent_volume_claim(namespace, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to
:meth:`~bgp_peerings` and :meth:`~netlinks` for obtaining other routing element types :rtype: tuple(Routing) """ return gateway_by_type(self, 'ospfv2_area') def add_traffic_handler(self, netlink, netlink_gw=None, network=None): """ Add a traffic handler to a routing node. A traffic handler can be either a static netlink or a multilink traffic handler. If ``network`` is not specified and the interface has multiple IP addresses, the traffic handler will be added to all ipv4 addresses. Add a pre-defined netlink to the route table of interface 0:: engine = Engine('vm') rnode = engine.routing.get(0) rnode.add_traffic_handler(StaticNetlink('mynetlink')) Add a pre-defined netlink only to a specific network on an interface with multiple addresses. Specify a netlink_gw for the netlink:: rnode = engine.routing.get(0) rnode.add_traffic_handler( StaticNetlink('mynetlink'), netlink_gw=[Router('myrtr'), Host('myhost')], network='172.18.1.0/24') :param StaticNetlink,Multilink netlink: netlink element :param list(Element) netlink_gw: list of elements that should be destinations for this netlink. Typically these may be of type host, router, group, server, network or engine. :param str network: if network specified, only add OSPF to this network on interface :raises UpdateElementFailed: failure updating routing :raises ModificationAborted: Change must be made at the interface level :raises ElementNotFound: ospf area not found :return: Status of whether the route table was updated :rtype: bool """ routing_node_gateway = RoutingNodeGateway(netlink, destinations=[] if not netlink_gw else netlink_gw) return self._add_gateway_node('netlink', routing_node_gateway, network) def add_ospf_area(self, ospf_area, ospf_interface_setting=None, network=None, communication_mode='NOT_FORCED', unicast_ref=None): """ Add OSPF Area to this routing node. Communication mode specifies how the interface will interact with the adjacent OSPF environment. Please see SMC API documentation for more in depth information on each option. If the interface has multiple networks nested below, all networks will receive the OSPF area by default unless the ``network`` parameter is specified. OSPF cannot be applied to IPv6 networks. Example of adding an area to interface routing node:: area = OSPFArea('area0') #obtain area resource #Set on routing interface 0 interface = engine.routing.get(0) interface.add_ospf_area(area) .. note:: If UNICAST is specified, you must also provide a unicast_ref of element type Host to identify the remote host. If no unicast_ref is provided, this is skipped :param OSPFArea ospf_area: OSPF area instance or href :param OSPFInterfaceSetting ospf_interface_setting: used to override the OSPF settings for this interface (optional) :param str network: if network specified, only add OSPF to this network on interface :param str communication_mode: NOT_FORCED|POINT_TO_POINT|PASSIVE|UNICAST :param Element unicast_ref: Element used as unicast gw (required for UNICAST) :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failure updating routing :raises ElementNotFound: ospf area not found :return: Status of whether the route table was updated :rtype: bool """ communication_mode = communication_mode.upper() destinations=[] if not ospf_interface_setting else [ospf_interface_setting] if communication_mode == 'UNICAST' and unicast_ref: destinations.append(unicast_ref) routing_node_gateway = RoutingNodeGateway( ospf_area, communication_mode=communication_mode, destinations=destinations) return self._add_gateway_node('ospfv2_area', routing_node_gateway, network) def add_bgp_peering(self, bgp_peering, external_bgp_peer=None, network=None): """ Add a BGP configuration to this routing interface. If the interface has multiple ip addresses, all networks will receive the BGP peering by default unless the ``network`` parameter is specified. Example of adding BGP to an interface by ID:: interface = engine.routing.get(0) interface.add_bgp_peering( BGPPeering('mypeer'), ExternalBGPPeer('neighbor')) :param BGPPeering bgp_peering: BGP Peer element :param ExternalBGPPeer,Engine external_bgp_peer: peer element or href :param str network: if network specified, only add OSPF to this network on interface :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failed to add BGP :return: Status of whether the route table was updated :rtype: bool """ destination = [external_bgp_peer] if external_bgp_peer else [] routing_node_gateway = RoutingNodeGateway(bgp_peering, destinations=destination) return self._add_gateway_node('bgp_peering', routing_node_gateway, network) def add_static_route(self, gateway, destination, network=None): """ Add a static route to this route table. Destination can be any element type supported in the routing table such as a Group of network members. Since a static route gateway needs to be on the same network as the interface, provide a value for `network` if an interface has multiple addresses on different networks. :: >>> engine = Engine('ve-1') >>> itf = engine.routing.get(0) >>> itf.add_static_route( gateway=Router('tmprouter'), destination=[Group('routegroup')]) :param Element gateway: gateway for this route (Router, Host) :param Element destination: destination network/s for this route. :type destination: list(Host, Router, ..) :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failure to update routing table :return: Status of whether the route table was updated :rtype: bool """ routing_node_gateway = RoutingNodeGateway(gateway, destinations=destination) return self._add_gateway_node('router', routing_node_gateway, network) def add_dynamic_gateway(self, networks): """ A dynamic gateway object creates a router object that is attached to a DHCP interface. You can associate networks with this gateway address to identify networks for routing on this interface. :: route = engine.routing.get(0) route.add_dynamic_gateway([Network('mynetwork')]) :param list Network: list of network elements to add to this DHCP gateway :raises ModificationAborted: Change must be made at the interface level :raises UpdateElementFailed: failure to update routing table :return: Status of whether the route table was updated :rtype: bool """ routing_node_gateway = RoutingNodeGateway(dynamic_classid='gateway', destinations=networks or []) return self._add_gateway_node('dynamic_netlink', routing_node_gateway) def _add_gateway_node_on_tunnel(self, routing_node_gateway): """ Add a gateway node on a tunnel interface. Tunnel interface elements are attached to the interface level and not directly nested under the networks node. :param RouteNodeGateway routing_node_gateway: routing node gateway instance :return: Whether a change was made or not :rtype: bool """ modified = False peering = [next_hop for next_hop in self if next_hop.routing_node_element == routing_node_gateway.routing_node_element] if not peering: self.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True # Have peering else: peers = [node.routing_node_element for peer in peering for node in peer] for destination in routing_node_gateway.destinations: if destination not in peers: peering[0].data.setdefault('routing_node', []).append( {'level': 'any', 'href': destination.href, 'name': destination.name}) modified = True if modified: self.update() return modified def _add_gateway_node(self, gw_type, routing_node_gateway, network=None): """ Add a gateway node to existing routing tree. Gateways are only added if they do not already exist. If they do exist, check the destinations of the existing gateway and add destinations that are not already there. A current limitation is that if a gateway doesn't exist and the destinations specified do not have IP addresses that are valid, they are still added (i.e. IPv4 gateway with IPv6 destination is considered invalid). :param Routing self: the routing node, should be the interface routing node :param str gw_type: type of gateway, i.e. netlink, ospfv2_area, etc :param RoutingNodeGateway route_node_gateway: gateway element :param str network: network to bind to. If none, all networks :return: Whether a change was made or not :rtype: bool """ if self.level != 'interface': raise ModificationAborted('You must make this change from the ' 'interface routing level. Current node: {}'.format(self)) if self.related_element_type == 'tunnel_interface': return self._add_gateway_node_on_tunnel(routing_node_gateway) # Find any existing gateways routing_node = list(gateway_by_type(self, type=gw_type, on_network=network)) _networks = [netwk for netwk in self if netwk.ip == network] if network is \ not None else list(self) # Routing Node Gateway to add as Element gateway_element_type = routing_node_gateway.routing_node_element modified = False for network in _networks: # Short circuit for dynamic interfaces if getattr(network, 'dynamic_classid', None): network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True break # Used for comparison to this_network_node = network.routing_node_element if routing_node and any(netwk for _intf, netwk, gw in routing_node if netwk.routing_node_element == this_network_node and gateway_element_type == gw.routing_node_element): # A gateway exists on this network for gw in network: if gw.routing_node_element == gateway_element_type: existing_dests = [node.routing_node_element for node in gw] for destination in routing_node_gateway.destinations: is_valid_destination = False if destination not in existing_dests: dest_ipv4, dest_ipv6 = _which_ip_protocol(destination) if len(network.ip.split(':')) > 1: # IPv6 if dest_ipv6: is_valid_destination = True else: if dest_ipv4: is_valid_destination = True if is_valid_destination: gw.data.setdefault('routing_node', []).append( {'level': 'any', 'href': destination.href, 'name': destination.name}) modified = True else: # Gateway doesn't exist gw_ipv4, gw_ipv6 = _which_ip_protocol(gateway_element_type) # ipv4, ipv6 or both if len(network.ip.split(':')) > 1: if gw_ipv6: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True else: # IPv4 if gw_ipv4: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True if modified: self.update() return modified def remove_route_gateway(self, element,
<gh_stars>0 from delphi_utils.geomap import GeoMapper import pytest import pandas as pd import numpy as np class TestGeoMapper: fips_data = pd.DataFrame( { "fips": ["01123", "02340", "98633", "18181"], "date": [pd.Timestamp("2018-01-01")] * 4, "count": [2, 0, 20, 10021], "total": [4, 0, 400, 100001], } ) fips_data_2 = pd.DataFrame( { "fips": ["01123", "02340", "02002", "18633", "18181"], "date": [pd.Timestamp("2018-01-01")] * 5, "count": [2, 1, 20, np.nan, 10021], "total": [4, 1, 400, np.nan, 100001], } ) fips_data_3 = pd.DataFrame( { "fips": ["48059", "48253", "48441", "72003", "72005", "10999"], "date": [pd.Timestamp("2018-01-01")] * 3 + [pd.Timestamp("2018-01-03")] * 3, "count": [1, 2, 3, 4, 8, 5], "total": [2, 4, 7, 11, 100, 10], } ) fips_data_4 = pd.DataFrame( { "fips": ["01123", "48253", "72003", "18181"], "date": [pd.Timestamp("2018-01-01")] * 4, "count": [2, 1, np.nan, 10021], "total": [4, 1, np.nan, 100001], } ) fips_data_5 = pd.DataFrame( { "fips": [1123, 48253, 72003, 18181], "date": [pd.Timestamp("2018-01-01")] * 4, "count": [2, 1, np.nan, 10021], "total": [4, 1, np.nan, 100001], } ) zip_data = pd.DataFrame( { "zip": ["45140", "95616", "95618"] * 2, "date": [pd.Timestamp("2018-01-01")] * 3 + [pd.Timestamp("2018-01-03")] * 3, "count": [99, 345, 456, 100, 344, 442], } ) zip_data["total"] = zip_data["count"] * 2 jan_month = pd.bdate_range("2018-01-01", "2018-02-01") mega_data = pd.concat( ( pd.DataFrame( { "fips": ["01001"] * len(jan_month), "date": jan_month, "count": np.arange(len(jan_month)), "visits": np.arange(len(jan_month)), } ), pd.DataFrame( { "fips": ["01002"] * len(jan_month), "date": jan_month, "count": np.arange(len(jan_month)), "visits": 2 * np.arange(len(jan_month)), } ), ) ) mega_data_2 = pd.concat( ( pd.DataFrame( { "fips": ["01001"] * len(jan_month), "date": jan_month, "count": np.arange(len(jan_month)), "_thr_col_roll": np.arange(len(jan_month)), } ), pd.DataFrame( { "fips": [11001] * len(jan_month), "date": jan_month, "count": np.arange(len(jan_month)), "_thr_col_roll": np.arange(len(jan_month)), } ), ) ) jhu_uid_data = pd.DataFrame( { "jhu_uid": [ 84048315, 84048137, 84013299, 84013299, 84070002, 84000013, 84090002, ], "date": [pd.Timestamp("2018-01-01")] * 3 + [pd.Timestamp("2018-01-03")] * 3 + [pd.Timestamp("2018-01-01")], "count": [1, 2, 3, 4, 8, 5, 20], "total": [2, 4, 7, 11, 100, 10, 40], } ) # jhu_big_data = pd.read_csv("test_dir/small_deaths.csv") # Loading tests updated 8/26 def test_crosswalks(self): # These tests ensure that the one-to-many crosswalks have properly normalized weights gmpr = GeoMapper() # FIPS -> HRR is allowed to be an incomplete mapping, since only a fraction of a FIPS # code can not belong to an HRR cw = gmpr._load_crosswalk(from_code="fips", to_code="hrr") assert ( cw.groupby("fips")["weight"].sum().round(5).ge(0.95).all() ) # some weight discrepancy is fine for HRR cw = gmpr._load_crosswalk(from_code="fips", to_code="zip") assert cw.groupby("fips")["weight"].sum().round(5).eq(1.0).all() cw = gmpr._load_crosswalk(from_code="jhu_uid", to_code="fips") assert cw.groupby("jhu_uid")["weight"].sum().round(5).eq(1.0).all() cw = gmpr._load_crosswalk(from_code="zip", to_code="fips") assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all() # weight discrepancy is fine for MSA, for the same reasons as HRR # cw = gmpr.load_crosswalk(from_code="zip", to_code="msa") # assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all() cw = gmpr._load_crosswalk(from_code="zip", to_code="state") assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all() def test_load_zip_fips_table(self): gmpr = GeoMapper() fips_data = gmpr._load_crosswalk(from_code="zip", to_code="fips") assert set(fips_data.columns) == set(["zip", "fips", "weight"]) assert pd.api.types.is_string_dtype(fips_data.zip) assert pd.api.types.is_string_dtype(fips_data.fips) assert pd.api.types.is_float_dtype(fips_data.weight) def test_load_state_table(self): gmpr = GeoMapper() state_data = gmpr._load_crosswalk(from_code="state", to_code="state") assert tuple(state_data.columns) == ("state_code", "state_id", "state_name") assert state_data.shape[0] == 60 def test_load_fips_msa_table(self): gmpr = GeoMapper() msa_data = gmpr._load_crosswalk(from_code="fips", to_code="msa") assert tuple(msa_data.columns) == ("fips", "msa") def test_load_jhu_uid_fips_table(self): gmpr = GeoMapper() jhu_data = gmpr._load_crosswalk(from_code="jhu_uid", to_code="fips") assert (jhu_data.groupby("jhu_uid").sum() == 1).all()[0] def test_load_zip_hrr_table(self): gmpr = GeoMapper() zip_data = gmpr._load_crosswalk(from_code="zip", to_code="hrr") assert pd.api.types.is_string_dtype(zip_data["zip"]) assert pd.api.types.is_string_dtype(zip_data["hrr"]) def test_convert_fips_to_state_code(self): gmpr = GeoMapper() new_data = gmpr.convert_fips_to_state_code(self.fips_data) assert new_data["state_code"].dtype == "O" assert new_data.loc[1, "state_code"] == new_data.loc[1, "fips"][:2] def test_fips_to_state_code(self): gmpr = GeoMapper() new_data = gmpr.fips_to_state_code(self.fips_data_3) assert np.allclose(new_data["count"].sum(), self.fips_data_3["count"].sum()) def test_convert_state_code_to_state_id(self): gmpr = GeoMapper() new_data = gmpr.convert_fips_to_state_code(self.fips_data) new_data = gmpr.convert_state_code_to_state_id(new_data) assert new_data["state_id"].isnull()[2] assert new_data["state_id"][3] == "in" assert len(pd.unique(new_data["state_id"])) == 4 def test_fips_to_state_id(self): gmpr = GeoMapper() new_data = gmpr.fips_to_state_id(self.fips_data_2) assert new_data["state_id"][2] == "in" assert new_data.shape[0] == 3 assert new_data["count"].sum() == self.fips_data_2["count"].sum() def test_fips_to_msa(self): gmpr = GeoMapper() new_data = gmpr.fips_to_msa(self.fips_data_3) assert new_data.shape[0] == 2 assert new_data["msa"][0] == "10180" new_data = gmpr.fips_to_msa(self.fips_data_3, create_mega=True) assert new_data[["count"]].sum()[0] == self.fips_data_3["count"].sum() def test_zip_to_fips(self): gmpr = GeoMapper() new_data = gmpr.zip_to_fips(self.zip_data) assert new_data.shape[0] == 10 assert ( new_data[["count", "total"]].sum() - self.zip_data[["count", "total"]].sum() ).sum() < 1e-3 def test_megacounty(self): gmpr = GeoMapper() new_data = gmpr.fips_to_megacounty(self.mega_data, 6, 50) assert ( new_data[["count", "visits"]].sum() - self.mega_data[["count", "visits"]].sum() ).sum() < 1e-3 with pytest.raises(ValueError): new_data = gmpr.megacounty_creation( self.mega_data_2, 6, 50, thr_col="_thr_col_roll" ) new_data = gmpr.fips_to_megacounty( self.mega_data, 6, 50, count_cols=["count", "visits"] ) assert ( new_data[["count"]].sum() - self.mega_data[["count"]].sum() ).sum() < 1e-3 def test_zip_to_hrr(self): gmpr = GeoMapper() new_data = gmpr.zip_to_hrr(self.zip_data) assert len(pd.unique(new_data["hrr"])) == 2 assert np.allclose( new_data[["count", "total"]].sum(), self.zip_data[["count", "total"]].sum() ) def test_jhu_uid_to_fips(self): gmpr = GeoMapper() new_data = gmpr.jhu_uid_to_fips(self.jhu_uid_data) assert not (new_data["fips"].astype(int) > 90000).any() assert new_data["total"].sum() == self.jhu_uid_data["total"].sum() def test_fips_to_zip(self): gmpr = GeoMapper() new_data = gmpr.fips_to_zip(self.fips_data_4) assert new_data["count"].sum() == self.fips_data_4["count"].sum() def test_fips_to_hrr(self): gmpr = GeoMapper() data = gmpr.convert_fips_to_hrr(self.fips_data_3) ind = self.fips_data_3["fips"].isin(data["fips"]) data = self.fips_data_3[ind] new_data = gmpr.fips_to_hrr(self.fips_data_3) assert new_data.shape == (2, 4) assert new_data["count"].sum() == data["count"].sum() def test_zip_to_msa(self): gmpr = GeoMapper() new_data = gmpr.zip_to_msa(self.zip_data) assert new_data["msa"][2] == "46700" assert new_data.shape[0] == 6 assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum()) def test_zip_to_state_code(self): gmpr = GeoMapper() new_data = gmpr.zip_to_state_code(self.zip_data) assert new_data.shape[0] == 4 assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum()) def test_zip_to_state_id(self): gmpr = GeoMapper() new_data = gmpr.zip_to_state_id(self.zip_data) assert new_data.shape[0] == 4 assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum()) def test_add_population_column(self): gmpr = GeoMapper() new_data = gmpr.add_population_column(self.fips_data_3, "fips") assert new_data.shape == (5, 5) new_data = gmpr.add_population_column(self.zip_data, "zip") assert new_data.shape == (6, 5) with pytest.raises(ValueError): new_data = gmpr.add_population_column(self.zip_data, "hrr") new_data = gmpr.add_population_column(self.fips_data_5, "fips") assert new_data.shape == (4, 5) def test_add_geocode(self): gmpr = GeoMapper() # fips -> zip new_data = gmpr.fips_to_zip(self.fips_data_3) new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "zip") assert new_data.equals(new_data2) # fips -> hrr new_data = gmpr.fips_to_hrr(self.fips_data_3) new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "hrr") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # fips -> msa new_data = gmpr.fips_to_msa(self.fips_data_3) new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "msa") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # fips -> state_id new_data = gmpr.fips_to_state_id(self.fips_data_4) new_data2 = gmpr.replace_geocode(self.fips_data_4, "fips", "state_id") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # fips -> state_code new_data = gmpr.fips_to_state_code(self.fips_data_4) new_data2 = gmpr.replace_geocode(self.fips_data_4, "fips", "state_code") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # fips -> state_code (again, mostly to cover the test case of when fips # codes aren't all strings) new_data = gmpr.fips_to_state_code(self.fips_data_5) new_data2 = gmpr.replace_geocode(self.fips_data_5, "fips", "state_code") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # zip -> fips new_data = gmpr.zip_to_fips(self.zip_data) new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "fips") new_data2 = new_data2[new_data.columns] assert new_data.equals(new_data2) # zip -> hrr new_data = gmpr.zip_to_hrr(self.zip_data) new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "hrr") new_data2 = new_data2[new_data.columns] assert new_data.equals(new_data2) # zip -> msa new_data = gmpr.zip_to_msa(self.zip_data) new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "msa") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # zip -> state_id new_data = gmpr.zip_to_state_id(self.zip_data) new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "state_id") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # zip -> state_code new_data = gmpr.zip_to_state_code(self.zip_data) new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "state_code") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # jhu_uid -> fips new_data = gmpr.jhu_uid_to_fips(self.jhu_uid_data) new_data2 = gmpr.replace_geocode(self.jhu_uid_data, "jhu_uid", "fips") new_data2 = new_data2[new_data.columns] assert np.allclose( new_data[["count", "total"]].values, new_data2[["count", "total"]].values ) # state_code -> hhs new_data = gmpr.add_geocode(self.zip_data, "zip", "state_code") new_data2 = gmpr.add_geocode(new_data, "state_code", "hhs_region_number") assert new_data2["hhs_region_number"].unique().size == 2 # state_name -> state_id new_data = gmpr.replace_geocode(self.zip_data, "zip", "state_name") new_data2 = gmpr.add_geocode(new_data, "state_name", "state_id") assert new_data2.shape == (4, 5) new_data2 = gmpr.replace_geocode(new_data, "state_name", "state_id", new_col="abbr") assert "abbr" in new_data2.columns # fips -> nation new_data = gmpr.replace_geocode(self.fips_data_5, "fips", "nation", new_col="NATION") assert new_data.equals( pd.DataFrame().from_dict( { "date": {0: pd.Timestamp("2018-01-01 00:00:00")}, "NATION": {0: "us"}, "count": {0: 10024.0}, "total": {0: 100006.0}, } ) ) # zip -> nation new_data = gmpr.replace_geocode(self.zip_data, "zip", "nation") assert new_data.equals( pd.DataFrame().from_dict( { "date": { 0: pd.Timestamp("2018-01-01"), 1: pd.Timestamp("2018-01-03"), }, "nation": {0: "us", 1: "us"}, "count": {0: 900, 1: 886}, "total": {0: 1800, 1: 1772}, } ) ) # hrr -> nation with pytest.raises(ValueError): new_data = gmpr.replace_geocode(self.zip_data, "zip", "hrr") new_data2 = gmpr.replace_geocode(new_data, "hrr", "nation") # fips -> hrr (dropna=True/False check) assert not gmpr.add_geocode(self.fips_data_3, "fips", "hrr").isna().any().any() assert gmpr.add_geocode(self.fips_data_3, "fips", "hrr", dropna=False).isna().any().any() # fips -> zip (date_col=None chech) new_data = gmpr.replace_geocode(self.fips_data_5.drop(columns=["date"]), "fips", "hrr", date_col=None) assert new_data.equals( pd.DataFrame().from_dict( { 'hrr': {0: '1', 1: '183', 2: '184', 3: '382', 4: '7'}, 'count': {0: 1.772347174163783, 1: 7157.392403522299, 2: 2863.607596477701, 3: 1.0, 4: 0.22765282583621685}, 'total': {0: 3.544694348327566, 1: 71424.64801363471, 2: 28576.35198636529, 3: 1.0, 4: 0.4553056516724337} }
'field_goal_attempts': 1533, 'free_throw_attempt_rate': 0.575, 'effective_field_goal_percentage': 0.525, 'total_rebounds': 659, 'center_percentage': 0, 'power_forward_percentage': 0, 'nationality': 'United States of America', 'offensive_win_shares': 11.5, 'defensive_rebounds': 564, 'total_rebound_percentage': 12.2, 'assists': 907, 'defensive_box_plus_minus': 1.5, 'shooting_distance': 15.9, 'height': '6-5', 'true_shooting_percentage': 0.613, 'small_forward_percentage': 0, 'minutes_played': 2947, 'on_court_plus_minus': 6.9, 'games_started': 81, 'win_shares_per_48_minutes': 0.245, 'dunks': 21, 'three_pointers_assisted_percentage': 0.317, 'two_point_attempts': 777, 'salary': 26540100, 'offensive_box_plus_minus': 8.7, 'lost_ball_turnovers': 114, 'percentage_shots_two_pointers': 0.507, 'shooting_guard_percentage': 1, 'shots_blocked': 85, 'and_ones': 29, 'three_pointers': 262, 'take_fouls': 2, 'free_throws': 746, 'usage_percentage': 34.2, 'assist_percentage': 50.7, 'three_point_attempts': 756, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.493, 'percentage_field_goals_as_dunks': 0.014, 'offensive_rebounds': 95, 'other_turnovers': 55, 'point_guard_percentage': 98, 'half_court_heaves_made': 0, 'blocks': 38, 'two_pointers_assisted_percentage': 0.095, 'win_shares': 15.0, 'defensive_win_shares': 3.6, 'games_played': 81, 'percentage_zero_to_three_footers': 0.242, 'steals': 121, 'turnovers': 464, 'field_goal_perc_zero_to_three_feet': 0.682, 'shooting_fouls': 80, 'team_abbreviation': 'HOU', 'points': 2356, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.357}, {'field_goal_perc_ten_to_sixteen_feet': 0.368, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.381, 'box_plus_minus': 1.0, 'two_point_percentage': 0.424, 'two_pointers': 140, 'net_plus_minus': 3.8, 'personal_fouls': 200, 'three_point_percentage': 0.375, 'player_efficiency_rating': 14.0, 'offensive_rebound_percentage': 3.2, 'percentage_sixteen_foot_plus_two_pointers': 0.168, 'three_point_attempt_rate': 0.429, 'field_goals': 233, 'free_throw_attempts': 240, 'shooting_fouls_drawn': 113, 'defensive_rebound_percentage': 12.7, 'steal_percentage': 2.4, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.308, 'percentage_ten_to_sixteen_footers': 0.033, 'turnover_percentage': 13.4, 'points_generated_by_assists': 299, 'block_percentage': 0.9, 'field_goal_percentage': 0.403, 'percentage_of_three_pointers_from_corner': 0.19, 'free_throw_percentage': 0.808, 'offensive_fouls': 12, 'half_court_heaves': 2, 'percentage_three_to_ten_footers': 0.045, 'passing_turnovers': 40, 'value_over_replacement_player': 1.3, 'blocking_fouls': 0, 'field_goal_attempts': 578, 'free_throw_attempt_rate': 0.415, 'effective_field_goal_percentage': 0.484, 'total_rebounds': 244, 'center_percentage': 0, 'power_forward_percentage': 0, 'nationality': 'United States of America', 'offensive_win_shares': 2.0, 'defensive_rebounds': 197, 'total_rebound_percentage': 8.1, 'assists': 137, 'defensive_box_plus_minus': 0.3, 'shooting_distance': 14.7, 'height': '6-5', 'true_shooting_percentage': 0.551, 'small_forward_percentage': 9, 'minutes_played': 1738, 'on_court_plus_minus': 6.4, 'games_started': 0, 'win_shares_per_48_minutes': 0.124, 'dunks': 25, 'three_pointers_assisted_percentage': 0.925, 'two_point_attempts': 330, 'salary': 4054160, 'offensive_box_plus_minus': 0.7, 'lost_ball_turnovers': 36, 'percentage_shots_two_pointers': 0.571, 'shooting_guard_percentage': 90, 'shots_blocked': 45, 'and_ones': 14, 'three_pointers': 93, 'take_fouls': 0, 'free_throws': 194, 'usage_percentage': 20.4, 'assist_percentage': 12.3, 'three_point_attempts': 248, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.429, 'percentage_field_goals_as_dunks': 0.048, 'offensive_rebounds': 47, 'other_turnovers': 30, 'point_guard_percentage': 1, 'half_court_heaves_made': 0, 'blocks': 20, 'two_pointers_assisted_percentage': 0.379, 'win_shares': 4.5, 'defensive_win_shares': 2.5, 'games_played': 76, 'percentage_zero_to_three_footers': 0.325, 'steals': 80, 'turnovers': 106, 'field_goal_perc_zero_to_three_feet': 0.468, 'shooting_fouls': 98, 'team_abbreviation': 'HOU', 'points': 753, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.468}, {'field_goal_perc_ten_to_sixteen_feet': 0.426, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.402, 'box_plus_minus': 6.7, 'two_point_percentage': 0.494, 'two_pointers': 474, 'net_plus_minus': 7.6, 'personal_fouls': 229, 'three_point_percentage': 0.359, 'player_efficiency_rating': 25.3, 'offensive_rebound_percentage': 2.2, 'percentage_sixteen_foot_plus_two_pointers': 0.12, 'three_point_attempt_rate': 0.406, 'field_goals': 710, 'free_throw_attempts': 837, 'shooting_fouls_drawn': 311, 'defensive_rebound_percentage': 15.6, 'steal_percentage': 2.2, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.328, 'percentage_ten_to_sixteen_footers': 0.105, 'turnover_percentage': 15.9, 'points_generated_by_assists': 1458, 'block_percentage': 1.4, 'field_goal_percentage': 0.439, 'percentage_of_three_pointers_from_corner': 0.094, 'free_throw_percentage': 0.86, 'offensive_fouls': 42, 'half_court_heaves': 0, 'percentage_three_to_ten_footers': 0.124, 'passing_turnovers': 169, 'value_over_replacement_player': 6.9, 'blocking_fouls': 4, 'field_goal_attempts': 1617, 'free_throw_attempt_rate': 0.518, 'effective_field_goal_percentage': 0.512, 'total_rebounds': 501, 'center_percentage': 0, 'power_forward_percentage': 1, 'nationality': 'United States of America', 'offensive_win_shares': 10.7, 'defensive_rebounds': 438, 'total_rebound_percentage': 8.8, 'assists': 612, 'defensive_box_plus_minus': -0.4, 'shooting_distance': 14.4, 'height': '6-5', 'true_shooting_percentage': 0.598, 'small_forward_percentage': 21, 'minutes_played': 3125, 'on_court_plus_minus': 1.7, 'games_started': 82, 'win_shares_per_48_minutes': 0.204, 'dunks': 16, 'three_pointers_assisted_percentage': 0.521, 'two_point_attempts': 960, 'salary': 15756438, 'offensive_box_plus_minus': 7.1, 'lost_ball_turnovers': 149, 'percentage_shots_two_pointers': 0.594, 'shooting_guard_percentage': 77, 'shots_blocked': 95, 'and_ones': 56, 'three_pointers': 236, 'take_fouls': 11, 'free_throws': 720, 'usage_percentage': 32.5, 'assist_percentage': 35.4, 'three_point_attempts': 657, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.406, 'percentage_field_goals_as_dunks': 0.011, 'offensive_rebounds': 63, 'other_turnovers': 56, 'point_guard_percentage': 2, 'half_court_heaves_made': 0, 'blocks': 51, 'two_pointers_assisted_percentage': 0.181, 'win_shares': 13.3, 'defensive_win_shares': 2.6, 'games_played': 82, 'percentage_zero_to_three_footers': 0.246, 'steals': 139, 'turnovers': 374, 'field_goal_perc_zero_to_three_feet': 0.65, 'shooting_fouls': 93, 'team_abbreviation': 'HOU', 'points': 2376, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.274}, {'field_goal_perc_ten_to_sixteen_feet': 0.4, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.413, 'box_plus_minus': 4.5, 'two_point_percentage': 0.579, 'two_pointers': 195, 'net_plus_minus': 8.0, 'personal_fouls': 150, 'three_point_percentage': 0.39, 'player_efficiency_rating': 21.1, 'offensive_rebound_percentage': 1.9, 'percentage_sixteen_foot_plus_two_pointers': 0.073, 'three_point_attempt_rate': 0.464, 'field_goals': 309, 'free_throw_attempts': 369, 'shooting_fouls_drawn': 161, 'defensive_rebound_percentage': 12.2, 'steal_percentage': 1.6, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.3, 'percentage_ten_to_sixteen_footers': 0.024, 'turnover_percentage': 14.8, 'points_generated_by_assists': 536, 'block_percentage': 0.6, 'field_goal_percentage': 0.491, 'percentage_of_three_pointers_from_corner': 0.113, 'free_throw_percentage': 0.846, 'offensive_fouls': 24, 'half_court_heaves': 1, 'percentage_three_to_ten_footers': 0.111, 'passing_turnovers': 62, 'value_over_replacement_player': 3.2, 'blocking_fouls': 0, 'field_goal_attempts': 629, 'free_throw_attempt_rate': 0.587, 'effective_field_goal_percentage': 0.582, 'total_rebounds': 252, 'center_percentage': 0, 'power_forward_percentage': 0, 'nationality': 'United States of America', 'offensive_win_shares': 7.5, 'defensive_rebounds': 222, 'total_rebound_percentage': 7.4, 'assists': 229, 'defensive_box_plus_minus': -1.1, 'shooting_distance': 14.3, 'height': '6-5', 'true_shooting_percentage': 0.66, 'small_forward_percentage': 44, 'minutes_played': 1946, 'on_court_plus_minus': 9.9, 'games_started': 2, 'win_shares_per_48_minutes': 0.23, 'dunks': 39, 'three_pointers_assisted_percentage': 0.86, 'two_point_attempts': 337, 'salary': 4604760, 'offensive_box_plus_minus': 5.6, 'lost_ball_turnovers': 39, 'percentage_shots_two_pointers': 0.536, 'shooting_guard_percentage': 55, 'shots_blocked': 50, 'and_ones': 28, 'three_pointers': 114, 'take_fouls': 0, 'free_throws': 312, 'usage_percentage': 21.6, 'assist_percentage': 19.3, 'three_point_attempts': 292, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.464, 'percentage_field_goals_as_dunks': 0.062, 'offensive_rebounds': 30, 'other_turnovers': 36, 'point_guard_percentage': 0, 'half_court_heaves_made': 0, 'blocks': 15, 'two_pointers_assisted_percentage': 0.282, 'win_shares': 9.3, 'defensive_win_shares': 1.8, 'games_played': 62, 'percentage_zero_to_three_footers': 0.328, 'steals': 62, 'turnovers': 137, 'field_goal_perc_zero_to_three_feet': 0.723, 'shooting_fouls': 49, 'team_abbreviation': 'HOU', 'points': 1044, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.455}, {'field_goal_perc_ten_to_sixteen_feet': 0.318, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.333, 'box_plus_minus': 5.5, 'two_point_percentage': 0.477, 'two_pointers': 406, 'net_plus_minus': -3.6, 'personal_fouls': 178, 'three_point_percentage': 0.368, 'player_efficiency_rating': 23.0, 'offensive_rebound_percentage': 2.4, 'percentage_sixteen_foot_plus_two_pointers': 0.13, 'three_point_attempt_rate': 0.364, 'field_goals': 585, 'free_throw_attempts': 792, 'shooting_fouls_drawn': 329, 'defensive_rebound_percentage': 11.9, 'steal_percentage': 2.4, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.296, 'percentage_ten_to_sixteen_footers': 0.049, 'turnover_percentage': 14.9, 'points_generated_by_assists': 1098, 'block_percentage': 1.0, 'field_goal_percentage': 0.438, 'percentage_of_three_pointers_from_corner': 0.033, 'free_throw_percentage': 0.851, 'offensive_fouls': 37, 'half_court_heaves': 3, 'percentage_three_to_ten_footers': 0.149, 'passing_turnovers': 143, 'value_over_replacement_player': 5.7, 'blocking_fouls': 0, 'field_goal_attempts': 1337, 'free_throw_attempt_rate': 0.592, 'effective_field_goal_percentage': 0.504, 'total_rebounds': 379, 'center_percentage': 0, 'power_forward_percentage': 0, 'nationality': 'United States of America', 'offensive_win_shares': 9.8, 'defensive_rebounds': 317, 'total_rebound_percentage': 7.2, 'assists': 455, 'defensive_box_plus_minus': 0.1, 'shooting_distance': 13.7, 'height': '6-5', 'true_shooting_percentage': 0.6, 'small_forward_percentage': 5, 'minutes_played': 2985, 'on_court_plus_minus': 2.8, 'games_started': 78, 'win_shares_per_48_minutes': 0.206, 'dunks': 46, 'three_pointers_assisted_percentage': 0.475, 'two_point_attempts': 851, 'salary': 5820417, 'offensive_box_plus_minus': 5.4, 'lost_ball_turnovers': 102, 'percentage_shots_two_pointers': 0.636, 'shooting_guard_percentage': 94, 'shots_blocked': 106, 'and_ones': 65, 'three_pointers': 179, 'take_fouls': 5, 'free_throws': 674, 'usage_percentage': 29.0, 'assist_percentage': 25.7, 'three_point_attempts': 486, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.364, 'percentage_field_goals_as_dunks': 0.035, 'offensive_rebounds': 62, 'other_turnovers': 50, 'point_guard_percentage': 0, 'half_court_heaves_made': 1, 'blocks': 38, 'two_pointers_assisted_percentage': 0.241, 'win_shares': 12.8, 'defensive_win_shares': 3.0, 'games_played': 78, 'percentage_zero_to_three_footers': 0.308, 'steals': 142, 'turnovers': 295, 'field_goal_perc_zero_to_three_feet': 0.65, 'shooting_fouls': 67, 'team_abbreviation': 'HOU', 'points': 2023, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.25}, {'field_goal_perc_ten_to_sixteen_feet': 0.437, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.368, 'box_plus_minus': 10.9, 'two_point_percentage': 0.531, 'two_pointers': 386, 'net_plus_minus': 5.3, 'personal_fouls': 169, 'three_point_percentage': 0.367, 'player_efficiency_rating': 29.8, 'offensive_rebound_percentage': 1.8, 'percentage_sixteen_foot_plus_two_pointers': 0.039, 'three_point_attempt_rate': 0.498, 'field_goals': 651, 'free_throw_attempts': 727, 'shooting_fouls_drawn': 273, 'defensive_rebound_percentage': 15.2, 'steal_percentage': 2.4, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.351, 'percentage_ten_to_sixteen_footers': 0.071, 'turnover_percentage': 15.1, 'points_generated_by_assists': 1499, 'block_percentage': 1.7, 'field_goal_percentage': 0.449, 'percentage_of_three_pointers_from_corner': 0.072, 'free_throw_percentage': 0.858, 'offensive_fouls': 29, 'half_court_heaves': 2, 'percentage_three_to_ten_footers': 0.118, 'passing_turnovers': 188, 'value_over_replacement_player': 8.3, 'blocking_fouls': 2, 'field_goal_attempts': 1449, 'free_throw_attempt_rate': 0.502, 'effective_field_goal_percentage': 0.541, 'total_rebounds': 389, 'center_percentage': 0, 'power_forward_percentage': 0, 'nationality': 'United States of America', 'offensive_win_shares': 11.6, 'defensive_rebounds': 348, 'total_rebound_percentage': 8.6, 'assists': 630, 'defensive_box_plus_minus': 1.3, 'shooting_distance': 15.3, 'height': '6-5', 'true_shooting_percentage': 0.619, 'small_forward_percentage': 6, 'minutes_played': 2551, 'on_court_plus_minus': 10.5, 'games_started': 72, 'win_shares_per_48_minutes': 0.289, 'dunks': 19, 'three_pointers_assisted_percentage': 0.26, 'two_point_attempts': 727, 'salary': 28299399, 'offensive_box_plus_minus': 9.6, 'lost_ball_turnovers': 87, 'percentage_shots_two_pointers': 0.502, 'shooting_guard_percentage': 75, 'shots_blocked': 98, 'and_ones': 55, 'three_pointers': 265, 'take_fouls': 5, 'free_throws': 624, 'usage_percentage': 36.1, 'assist_percentage': 45.1, 'three_point_attempts': 722, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.498, 'percentage_field_goals_as_dunks': 0.013, 'offensive_rebounds': 41, 'other_turnovers': 40, 'point_guard_percentage': 19, 'half_court_heaves_made': 0, 'blocks': 50, 'two_pointers_assisted_percentage': 0.088, 'win_shares': 15.4, 'defensive_win_shares': 3.8, 'games_played': 72, 'percentage_zero_to_three_footers': 0.273, 'steals': 126, 'turnovers': 315, 'field_goal_perc_zero_to_three_feet': 0.658, 'shooting_fouls': 65, 'team_abbreviation': 'HOU', 'points': 2191, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.327}, {'field_goal_perc_ten_to_sixteen_feet': 0.444, 'field_goal_perc_sixteen_foot_plus_two_pointers': 0.402, 'box_plus_minus': 2.3, 'two_point_percentage': 0.514, 'two_pointers': 185, 'net_plus_minus': 2.7, 'personal_fouls': 207, 'three_point_percentage': 0.349, 'player_efficiency_rating': 16.4, 'offensive_rebound_percentage': 2.3, 'percentage_sixteen_foot_plus_two_pointers': 0.127, 'three_point_attempt_rate': 0.474, 'field_goals': 298, 'free_throw_attempts': 343, 'shooting_fouls_drawn': 152, 'defensive_rebound_percentage': 11.0, 'steal_percentage': 2.2, 'weight': 220, 'field_goal_perc_three_to_ten_feet': 0.28, 'percentage_ten_to_sixteen_footers': 0.026, 'turnover_percentage': 11.3, 'points_generated_by_assists': 419, 'block_percentage': 0.8, 'field_goal_percentage': 0.436, 'percentage_of_three_pointers_from_corner': 0.182, 'free_throw_percentage': 0.843, 'offensive_fouls': 29, 'half_court_heaves': 1, 'percentage_three_to_ten_footers': 0.073, 'passing_turnovers': 29, 'value_over_replacement_player': 2.4, 'blocking_fouls': 0, 'field_goal_attempts': 684, 'free_throw_attempt_rate': 0.501, 'effective_field_goal_percentage': 0.518, 'total_rebounds': 255, 'center_percentage': 0, 'power_forward_percentage': 1, 'nationality': 'United States of America', 'offensive_win_shares': 5.0, 'defensive_rebounds': 213, 'total_rebound_percentage': 6.8, 'assists': 176, 'defensive_box_plus_minus': -0.6, 'shooting_distance': 15.4, 'height': '6-5', 'true_shooting_percentage': 0.598, 'small_forward_percentage': 26, 'minutes_played': 2189, 'on_court_plus_minus': 5.5, 'games_started': 5, 'win_shares_per_48_minutes': 0.156, 'dunks': 46, 'three_pointers_assisted_percentage': 0.823, 'two_point_attempts': 360, 'salary': 4304520, 'offensive_box_plus_minus': 2.9, 'lost_ball_turnovers': 37, 'percentage_shots_two_pointers': 0.526, 'shooting_guard_percentage': 74, 'shots_blocked': 46, 'and_ones': 27, 'three_pointers': 113, 'take_fouls': 0, 'free_throws': 289, 'usage_percentage': 19.5, 'assist_percentage': 12.8, 'three_point_attempts': 324, 'player_id': 'hardeja01', 'percentage_shots_three_pointers': 0.474, 'percentage_field_goals_as_dunks': 0.07, 'offensive_rebounds': 42, 'other_turnovers': 40, 'point_guard_percentage': 0, 'half_court_heaves_made': 0, 'blocks': 24, 'two_pointers_assisted_percentage': 0.422, 'win_shares': 7.1, 'defensive_win_shares': 2.1, 'games_played': 82, 'percentage_zero_to_three_footers': 0.3, 'steals': 92, 'turnovers': 106, 'field_goal_perc_zero_to_three_feet': 0.624, 'shooting_fouls': 81, 'team_abbreviation': 'HOU', 'points': 998, 'position': 'SG', 'three_point_shot_percentage_from_corner': 0.356} ] indices = ['2013-14', 'Career', '2014-15', '2016-17', '2009-10', '2015-16', '2011-12', '2012-13', '2017-18', '2010-11'] df = pd.DataFrame(dataframe, index=indices) player = self.player('') # Pandas doesn't natively allow comparisons of DataFrames. # Concatenating the two DataFrames (the one generated during the test # and the expected one above) and dropping duplicate rows leaves only # the rows that are unique between the two frames. This allows a quick # check of the DataFrame to see if it is empty -
out.close() pyrax.http.request = sav_req ident.http_log_debug = sav_debug sys.stdout = sav_stdout def test_call_without_slash(self): ident = self.base_identity_class() ident._get_auth_endpoint = Mock() ident._get_auth_endpoint.return_value = "http://example.com/v2.0" ident.verify_ssl = False pyrax.http.request = Mock() ident._call("POST", "tokens", False, {}, {}, False) pyrax.http.request.assert_called_with("POST", "http://example.com/v2.0/tokens", headers={}, raise_exception=False) def test_call_with_slash(self): ident = self.base_identity_class() ident._get_auth_endpoint = Mock() ident._get_auth_endpoint.return_value = "http://example.com/v2.0/" ident.verify_ssl = False pyrax.http.request = Mock() ident._call("POST", "tokens", False, {}, {}, False) pyrax.http.request.assert_called_with("POST", "http://example.com/v2.0/tokens", headers={}, raise_exception=False) def test_list_users(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" ident.method_get = Mock(return_value=(resp, resp.json())) ret = ident.list_users() self.assertTrue(isinstance(ret, list)) are_users = [isinstance(itm, pyrax.rax_identity.User) for itm in ret] self.assertTrue(all(are_users)) def test_list_users_alt_body(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" alt = fakes.fake_identity_user_response.get("users") alt[0]["password"] = "<PASSWORD>" ident.method_get = Mock(return_value=(resp, alt)) ret = ident.list_users() self.assertTrue(isinstance(ret, list)) are_users = [isinstance(itm, pyrax.rax_identity.User) for itm in ret] self.assertTrue(all(are_users)) def test_list_users_fail(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 401 ident.method_get = Mock(return_value=(resp, resp.json())) self.assertRaises(exc.AuthorizationFailure, ident.list_users) def test_find_user_by_name_rax(self): ident = self.rax_identity_class() ident.get_user = Mock() fake_name = utils.random_unicode() ret = ident.find_user_by_name(fake_name) ident.get_user.assert_called_with(username=fake_name) def test_find_user_by_email_rax(self): ident = self.rax_identity_class() ident.get_user = Mock() fake_email = utils.random_unicode() ret = ident.find_user_by_email(fake_email) ident.get_user.assert_called_with(email=fake_email) def test_find_user_by_id_rax(self): ident = self.rax_identity_class() ident.get_user = Mock() fake_id = utils.random_unicode() ret = ident.find_user_by_id(fake_id) ident.get_user.assert_called_with(user_id=fake_id) def test_find_user_fail_rax(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 404 ident.method_get = Mock(return_value=(resp, resp.json())) fake_user = utils.random_unicode() self.assertRaises(exc.NotFound, ident.get_user, username=fake_user) def test_find_user_fail_base(self): ident = self.identity fake = utils.random_unicode() self.assertRaises(NotImplementedError, ident.find_user_by_name, fake) self.assertRaises(NotImplementedError, ident.find_user_by_email, fake) self.assertRaises(NotImplementedError, ident.find_user_by_id, fake) self.assertRaises(NotImplementedError, ident.get_user, fake) def test_get_user_by_id(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp_body = resp.json().copy() del resp_body["users"] fake = utils.random_unicode() ident.method_get = Mock(return_value=(resp, resp_body)) ret = ident.get_user(user_id=fake) self.assertTrue(isinstance(ret, base_identity.User)) def test_get_user_by_username(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp_body = resp.json().copy() del resp_body["users"] fake = utils.random_unicode() ident.method_get = Mock(return_value=(resp, resp_body)) ret = ident.get_user(username=fake) self.assertTrue(isinstance(ret, base_identity.User)) def test_get_user_by_email(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp_body = resp.json() fake = utils.random_unicode() ident.method_get = Mock(return_value=(resp, resp_body)) ret = ident.get_user(email=fake) self.assertTrue(isinstance(ret[0], base_identity.User)) def test_get_user_missing_params(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" ident.method_get = Mock(return_value=(resp, resp.json())) self.assertRaises(ValueError, ident.get_user) def test_get_user_not_found(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp_body = resp.json().copy() del resp_body["users"] del resp_body["user"] fake = utils.random_unicode() ident.method_get = Mock(return_value=(resp, resp_body)) self.assertRaises(exc.NotFound, ident.get_user, username=fake) def test_create_user(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 201 ident.method_post = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = utils.random_unicode() ident.create_user(fake_name, fake_email, fake_password) cargs = ident.method_post.call_args self.assertEqual(len(cargs), 2) self.assertEqual(cargs[0], ("users", )) data = cargs[1]["data"]["user"] self.assertEqual(data["username"], fake_name) self.assertTrue(fake_password in data.values()) def test_create_user_not_authorized(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 401 ident.method_post = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = <PASSWORD>() self.assertRaises(exc.AuthorizationFailure, ident.create_user, fake_name, fake_email, fake_password) def test_create_user_duplicate(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 409 ident.method_post = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = utils.random_unicode() self.assertRaises(exc.DuplicateUser, ident.create_user, fake_name, fake_email, fake_password) def test_create_user_bad_email(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 400 resp_body = {"badRequest": {"message": "Expecting valid email address"}} ident.method_post = Mock(return_value=(resp, resp_body)) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = utils.random_unicode() self.assertRaises(exc.InvalidEmail, ident.create_user, fake_name, fake_email, fake_password) def test_create_user_not_found(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 404 ident.method_post = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = utils.random_unicode() self.assertRaises(exc.AuthorizationFailure, ident.create_user, fake_name, fake_email, fake_password) def test_create_user_other(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 400 resp_body = {"badRequest": {"message": "fake"}} ident.method_post = Mock(return_value=(resp, resp_body)) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_password = <PASSWORD>.random_unicode() self.assertRaises(exc.BadRequest, ident.create_user, fake_name, fake_email, fake_password) def test_update_user(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" ident.method_put = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_username = utils.random_unicode() fake_uid = utils.random_unicode() fake_region = utils.random_unicode() fake_enabled = random.choice((True, False)) kwargs = {"email": fake_email, "username": fake_username, "uid": fake_uid, "enabled": fake_enabled} if isinstance(ident, self.rax_identity_class): kwargs["defaultRegion"] = fake_region ident.update_user(fake_name, **kwargs) cargs = ident.method_put.call_args self.assertEqual(len(cargs), 2) self.assertEqual(cargs[0], ("users/%s" % fake_name, )) data = cargs[1]["data"]["user"] self.assertEqual(data["enabled"], fake_enabled) self.assertEqual(data["username"], fake_username) self.assertTrue(fake_email in data.values()) if isinstance(ident, self.rax_identity_class): self.assertTrue(fake_region in data.values()) def test_update_user_fail(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 401 ident.method_put = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() fake_email = utils.random_unicode() fake_username = utils.random_unicode() fake_uid = utils.random_unicode() fake_region = utils.random_unicode() fake_enabled = random.choice((True, False)) kwargs = {"email": fake_email, "username": fake_username, "uid": fake_uid, "enabled": fake_enabled} if isinstance(ident, self.rax_identity_class): kwargs["defaultRegion"] = fake_region self.assertRaises(exc.AuthorizationFailure, ident.update_user, fake_name, **kwargs) def test_delete_user(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" ident.method_delete = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() ident.delete_user(fake_name) cargs = ident.method_delete.call_args self.assertEqual(len(cargs), 2) self.assertEqual(cargs[0], ("users/%s" % fake_name, )) def test_delete_user_not_found(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 404 ident.method_delete = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() self.assertRaises(exc.UserNotFound, ident.delete_user, fake_name) def test_delete_user_fail(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 401 ident.method_delete = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() self.assertRaises(exc.AuthorizationFailure, ident.delete_user, fake_name) def test_list_roles_for_user(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 200 ident.method_get = Mock(return_value=(resp, resp.json())) resp = ident.list_roles_for_user("fake") self.assertTrue(isinstance(resp, list)) role = resp[0] self.assertTrue("description" in role) self.assertTrue("name" in role) self.assertTrue("id" in role) def test_list_roles_for_user_fail(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 401 ident.method_get = Mock(return_value=(resp, resp.json())) self.assertRaises(exc.AuthorizationFailure, ident.list_roles_for_user, "fake") def test_list_credentials(self): ident = self.rax_identity_class() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 200 ident.method_get = Mock(return_value=(resp, resp.json())) fake_name = utils.random_unicode() ident.list_credentials(fake_name) cargs = ident.method_get.call_args called_uri = cargs[0][0] self.assertTrue("/credentials" in called_uri) self.assertTrue("users/%s/" % fake_name in called_uri) def test_list_credentials_no_user(self): ident = self.identity ident.user = fakes.FakeEntity() resp = fakes.FakeIdentityResponse() resp.response_type = "users" resp.status_code = 200 ident.method_get = Mock(return_value=(resp, resp.json())) ident.list_credentials() cargs = ident.method_get.call_args called_uri = cargs[0][0] self.assertTrue("/credentials" in called_uri) self.assertTrue("users/%s/" % ident.user.id in called_uri) def test_get_keystone_endpoint(self): ident = self.keystone_identity_class() fake_ep = utils.random_unicode() sav_setting = pyrax.get_setting pyrax.get_setting = Mock(return_value=fake_ep) ep = ident._get_auth_endpoint() self.assertEqual(ep, fake_ep) pyrax.get_setting = sav_setting def test_get_keystone_endpoint_fail(self): ident = self.keystone_identity_class() sav_setting = pyrax.get_setting pyrax.get_setting = Mock(return_value=None) self.assertRaises(exc.EndpointNotDefined, ident._get_auth_endpoint) pyrax.get_setting = sav_setting def test_get_token(self): for cls in self.id_classes.values(): ident = cls() ident.token = "<PASSWORD>" sav_valid = ident._has_valid_token sav_auth = ident.authenticate ident._has_valid_token = Mock(return_value=True) ident.authenticate = Mock() tok = ident.get_token() self.assertEqual(tok, "test_token") # Force tok = ident.get_token(force=True) ident.authenticate.assert_called_with() # Invalid token ident._has_valid_token = Mock(return_value=False) ident.authenticated = False tok = ident.get_token() ident.authenticate.assert_called_with() ident._has_valid_token = sav_valid ident.authenticate = sav_auth def test_has_valid_token(self): savrequest = pyrax.http.request pyrax.http.request = Mock(return_value=(fakes.FakeIdentityResponse(), fakes.fake_identity_response)) for cls in self.id_classes.values(): ident = cls() if cls is self.keystone_identity_class: # Necessary for testing to avoid NotImplementedError. utils.add_method(ident, lambda self: "", "_get_auth_endpoint") ident.authenticate() valid = ident._has_valid_token() self.assertTrue(valid) ident.expires = datetime.datetime.now() - datetime.timedelta(1) valid = ident._has_valid_token() self.assertFalse(valid) ident = self._get_clean_identity() valid = ident._has_valid_token() self.assertFalse(valid) pyrax.http.request = savrequest def test_list_token(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" ident.method_get = Mock(return_value=(resp, resp.json())) tokens = ident.list_tokens() ident.method_get.assert_called_with("tokens/%s" % ident.token, admin=True) self.assertTrue("token" in tokens) def test_list_token_fail(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" resp.status_code = 403 ident.method_get = Mock(return_value=(resp, resp.json())) self.assertRaises(exc.AuthorizationFailure, ident.list_tokens) def test_check_token(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" ident.method_head = Mock(return_value=(resp, resp.json())) valid = ident.check_token() ident.method_head.assert_called_with("tokens/%s" % ident.token, admin=True) self.assertTrue(valid) def test_check_token_fail_auth(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" resp.status_code = 403 ident.method_head = Mock(return_value=(resp, resp.json())) self.assertRaises(exc.AuthorizationFailure, ident.check_token) def test_check_token_fail_valid(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" resp.status_code = 404 ident.method_head = Mock(return_value=(resp, resp.json())) valid = ident.check_token() ident.method_head.assert_called_with("tokens/%s" % ident.token, admin=True) self.assertFalse(valid) def test_revoke_token(self): for cls in self.id_classes.values(): ident = cls() resp = fakes.FakeIdentityResponse() resp.response_type = "tokens" token = ident.token = utils.random_unicode() ident.method_delete = Mock(return_value=(resp,
<filename>aea/helpers/dialogue/base.py # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """ This module contains the classes required for dialogue management. - DialogueLabel: The dialogue label class acts as an identifier for dialogues. - Dialogue: The dialogue class maintains state of a dialogue and manages it. - Dialogues: The dialogues class keeps track of all dialogues. """ import itertools import secrets from abc import ABC, abstractmethod from enum import Enum from typing import Callable, Dict, FrozenSet, List, Optional, Set, Tuple, Type, cast from aea.mail.base import Address from aea.protocols.base import Message class DialogueLabel: """The dialogue label class acts as an identifier for dialogues.""" NONCE_BYTES_NB = 32 def __init__( self, dialogue_reference: Tuple[str, str], dialogue_opponent_addr: Address, dialogue_starter_addr: Address, ) -> None: """ Initialize a dialogue label. :param dialogue_reference: the reference of the dialogue. :param dialogue_opponent_addr: the addr of the agent with which the dialogue is kept. :param dialogue_starter_addr: the addr of the agent which started the dialogue. :return: None """ self._dialogue_reference = dialogue_reference self._dialogue_opponent_addr = dialogue_opponent_addr self._dialogue_starter_addr = dialogue_starter_addr @property def dialogue_reference(self) -> Tuple[str, str]: """Get the dialogue reference.""" return self._dialogue_reference @property def dialogue_starter_reference(self) -> str: """Get the dialogue starter reference.""" return self._dialogue_reference[0] @property def dialogue_responder_reference(self) -> str: """Get the dialogue responder reference.""" return self._dialogue_reference[1] @property def dialogue_opponent_addr(self) -> str: """Get the address of the dialogue opponent.""" return self._dialogue_opponent_addr @property def dialogue_starter_addr(self) -> str: """Get the address of the dialogue starter.""" return self._dialogue_starter_addr def __eq__(self, other) -> bool: """Check for equality between two DialogueLabel objects.""" if isinstance(other, DialogueLabel): return ( self.dialogue_reference == other.dialogue_reference and self.dialogue_starter_addr == other.dialogue_starter_addr and self.dialogue_opponent_addr == other.dialogue_opponent_addr ) return False def __hash__(self) -> int: """Turn object into hash.""" return hash( ( self.dialogue_reference, self.dialogue_opponent_addr, self.dialogue_starter_addr, ) ) @property def json(self) -> Dict: """Return the JSON representation.""" return { "dialogue_starter_reference": self.dialogue_starter_reference, "dialogue_responder_reference": self.dialogue_responder_reference, "dialogue_opponent_addr": self.dialogue_opponent_addr, "dialogue_starter_addr": self.dialogue_starter_addr, } @classmethod def from_json(cls, obj: Dict[str, str]) -> "DialogueLabel": """Get dialogue label from json.""" dialogue_label = DialogueLabel( ( cast(str, obj.get("dialogue_starter_reference")), cast(str, obj.get("dialogue_responder_reference")), ), cast(str, obj.get("dialogue_opponent_addr")), cast(str, obj.get("dialogue_starter_addr")), ) return dialogue_label def get_incomplete_version(self) -> "DialogueLabel": """Get the incomplete version of the label.""" dialogue_label = DialogueLabel( (self.dialogue_starter_reference, Dialogue.OPPONENT_STARTER_REFERENCE), self.dialogue_opponent_addr, self.dialogue_starter_addr, ) return dialogue_label def __str__(self): """Get the string representation.""" return "{}_{}_{}_{}".format( self.dialogue_starter_reference, self.dialogue_responder_reference, self.dialogue_opponent_addr, self.dialogue_starter_addr, ) @classmethod def from_str(cls, obj: str) -> "DialogueLabel": """Get the dialogue label from string representation.""" ( dialogue_starter_reference, dialogue_responder_reference, dialogue_opponent_addr, dialogue_starter_addr, ) = obj.split("_") dialogue_label = DialogueLabel( (dialogue_starter_reference, dialogue_responder_reference), dialogue_opponent_addr, dialogue_starter_addr, ) return dialogue_label class Dialogue(ABC): """The dialogue class maintains state of a dialogue and manages it.""" STARTING_MESSAGE_ID = 1 STARTING_TARGET = 0 OPPONENT_STARTER_REFERENCE = "" class Rules: """This class defines the rules for the dialogue.""" def __init__( self, initial_performatives: FrozenSet[Message.Performative], terminal_performatives: FrozenSet[Message.Performative], valid_replies: Dict[Message.Performative, FrozenSet[Message.Performative]], ) -> None: """ Initialize a dialogue. :param initial_performatives: the set of all initial performatives. :param terminal_performatives: the set of all terminal performatives. :param valid_replies: the reply structure of speech-acts. :return: None """ self._initial_performatives = initial_performatives self._terminal_performatives = terminal_performatives self._valid_replies = valid_replies @property def initial_performatives(self) -> FrozenSet[Message.Performative]: """ Get the performatives one of which the terminal message in the dialogue must have. :return: the valid performatives of an terminal message """ return self._initial_performatives @property def terminal_performatives(self) -> FrozenSet[Message.Performative]: """ Get the performatives one of which the terminal message in the dialogue must have. :return: the valid performatives of an terminal message """ return self._terminal_performatives @property def valid_replies( self, ) -> Dict[Message.Performative, FrozenSet[Message.Performative]]: """ Get all the valid performatives which are a valid replies to performatives. :return: the full valid reply structure. """ return self._valid_replies def get_valid_replies( self, performative: Message.Performative ) -> FrozenSet[Message.Performative]: """ Given a `performative`, return the list of performatives which are its valid replies in a dialogue. :param performative: the performative in a message :return: list of valid performative replies """ assert ( performative in self.valid_replies ), "this performative '{}' is not supported".format(performative) return self.valid_replies[performative] class Role(Enum): """This class defines the agent's role in a dialogue.""" def __str__(self): """Get the string representation.""" return str(self.value) class EndState(Enum): """This class defines the end states of a dialogue.""" def __str__(self): """Get the string representation.""" return str(self.value) def __init__( self, dialogue_label: DialogueLabel, message_class: Optional[Type[Message]] = None, agent_address: Optional[Address] = None, role: Optional[Role] = None, rules: Optional[Rules] = None, ) -> None: """ Initialize a dialogue. :param dialogue_label: the identifier of the dialogue :param agent_address: the address of the agent for whom this dialogue is maintained :param role: the role of the agent this dialogue is maintained for :param rules: the rules of the dialogue :return: None """ self._agent_address = agent_address self._incomplete_dialogue_label = dialogue_label.get_incomplete_version() self._dialogue_label = dialogue_label self._role = role self._is_self_initiated = ( dialogue_label.dialogue_opponent_addr is not dialogue_label.dialogue_starter_addr ) self._outgoing_messages = [] # type: List[Message] self._incoming_messages = [] # type: List[Message] self._rules = rules if message_class is not None: assert issubclass(message_class, Message) self._message_class = message_class @property def dialogue_label(self) -> DialogueLabel: """ Get the dialogue label. :return: The dialogue label """ return self._dialogue_label @property def incomplete_dialogue_label(self) -> DialogueLabel: """ Get the dialogue label. :return: The incomplete dialogue label """ return self._incomplete_dialogue_label @property def dialogue_labels(self) -> Set[DialogueLabel]: """ Get the dialogue labels (incomplete and complete, if it exists) :return: the dialogue labels """ return {self._dialogue_label, self._incomplete_dialogue_label} @property def agent_address(self) -> Address: """ Get the address of the agent for whom this dialogues is maintained. :return: the agent address """ assert self._agent_address is not None, "agent_address is not set." return self._agent_address @agent_address.setter def agent_address(self, agent_address: Address) -> None: """ Set the address of the agent for whom this dialogues is maintained. :param: the agent address """ self._agent_address = agent_address @property def role(self) -> "Role": """ Get the agent's role in the dialogue. :return: the agent's role """ assert self._role is not None, "Role is not set." return self._role @role.setter def role(self, role: "Role") -> None: """ Set the agent's role in the dialogue. :param role: the agent's role :return: None """ self._role = role @property def rules(self) -> "Rules": """ Get the dialogue rules. :return: the rules """ assert self._rules is not None, "Rules is not set." return self._rules @property def is_self_initiated(self) -> bool: """ Check whether the agent initiated the dialogue. :return: True if the agent initiated the dialogue, False otherwise """ return self._is_self_initiated @property def last_incoming_message(self) -> Optional[Message]: """ Get the last incoming message. :return: the last incoming message if it exists, None otherwise """ return self._incoming_messages[-1] if len(self._incoming_messages) > 0 else None @property def last_outgoing_message(self) -> Optional[Message]: """ Get the last outgoing message. :return: the last outgoing message if it exists, None otherwise """ return self._outgoing_messages[-1] if len(self._outgoing_messages) > 0 else None @property def last_message(self) -> Optional[Message]: """ Get the last message. :return: the last message if it exists, None otherwise """ last_message = None # type: Optional[Message] if ( self.last_incoming_message is not None and self.last_outgoing_message is not None ): last_message = ( self.last_outgoing_message if self.last_outgoing_message.message_id > self.last_incoming_message.message_id else self.last_incoming_message ) elif self.last_incoming_message is not None: last_message = self.last_incoming_message elif self.last_outgoing_message is not None: last_message = self.last_outgoing_message return last_message def get_message(self, message_id_to_find: int) -> Optional[Message]: """ Get the message whose id is 'message_id'. :param message_id_to_find: the id of the message :return: the message if it exists, None otherwise """ result = None # type: Optional[Message] list_of_all_messages = self._outgoing_messages + self._incoming_messages for message in list_of_all_messages: if message.message_id == message_id_to_find: result = message break return result @property def is_empty(self) -> bool: """ Check whether the dialogue is empty. :return: True if empty, False otherwise """ return len(self._outgoing_messages) == 0 and len(self._incoming_messages) == 0 def update(self, message: Message) -> bool:
open('articles.txt', 'wb') as ap: pickle.dump(X, ap) # Converting a collection of stemmed tweets to a matrix of token counts # (Konwersja kolekcji tweetów na macierz liczby tokenow) X = [' '.join(a) for a in tweets['stem']] c = CountVectorizer(token_pattern='(?u)\\<KEY>',min_df=3,max_df=0.5) # min_df=3 means "ignore terms that appear in less than 3 documents" # max_df=0.5 means "ignore terms that appear in more than 50% of the documents" dtm = c.fit(X) art = dtm.transform(X) # Function for top ten words # (funkcja zliczaca 10 najczesciej wystepujacych słów) def top_words(M,dtm,k): words = np.array(dtm.get_feature_names()) return(np.array([words[np.squeeze(np.array(np.argsort(M[i,:].todense())))[-k:]] for i in range(M.shape[0])])) top_words(art,dtm,10) # Vocabulary (słownik) vocab = c.get_feature_names() np.array(vocab[:30]) # Number of index(row) # (index tj. numer wiersza) dtm.vocabulary_['bezrobo'] # Visualize the most frequent 10 words words_cv = [(w, i, art.getcol(i).sum()) for w, i in dtm.vocabulary_.items()] words_cv = sorted(words_cv, key=lambda x: -x[2])[0:99] words_cv[:8] # Text processing - TF IDF # Tf-idf stands for term frequencey-inverse document frequency. It's a numerical statistic intended to reflect # How important a word is to a document or a corpus (i.e a collection of documents) X = [' '.join(a) for a in tweets['stem']] vectorizer = TfidfVectorizer(min_df=10, max_features=10000, tokenizer=tokenizer, ngram_range=(1, 1)) vz = vectorizer.fit_transform(X) # min_df is minimum number of documents that contain a term t # max_features is maximum number of unique tokens (across documents) that we'd consider # TfidfVectorizer preprocesses texts using the tokenizer we defined above vz.shape # vz is a tfidf matrix: # - its number of rows is the total number of documents (list of stemmed tweets) # - its number of columns is the total number of unique terms (tokens) across the documents vz.shape[1] # number of columns # Vocabulary (słownik) words = vectorizer.get_feature_names() words[0:5] #vectorizer.vocabulary_ # Dictionary mapping the tokens to their tfidf values tfidfy = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_)) tfidfy = pd.DataFrame(columns=['tfidf']).from_dict(dict(tfidfy), orient='index') tfidfy.columns = ['tfidf'] tfidfy.head(5) # Visualisation of distribution of the tfidf scores through a histogram get_ipython().magic('matplotlib inline') tfidfy.tfidf.hist(bins=50, figsize=(15,7)) # Getting the tf-idf values of features (tokens that have the lowest tfidf scores) words_tfidf = [(w, i, vz.getcol(i).sum()) for w, i in vectorizer.vocabulary_.items()] words_tfidf = sorted(words_tfidf, key=lambda x: -x[2])[0:99] words_tfidf[:9] # Tokens that have the lowest tfidf scores # These are very common across many tweets tfidfy.sort_values(by=['tfidf'], ascending=True).head(10) # Tokens with highest tfidf scores # Less common words. These words carry more meaning tfidfy.sort_values(by=['tfidf'], ascending=False).head(10) # SVD - dimension reduction (redukcja wymiarów) # Tweets (documents) have more than 2000 features (see the vz shape), ie.each document has more than 2000 dimensions # Singular Value Decomposition (SVD) is to reduce the dimension of each vector to 50 and then using t-SNE to reduce the dimension from 50 to 2 # (SVD - dekompozycja, rozkład macierzy) svd = TruncatedSVD(n_components=50, random_state=0) svd_tfidf = svd.fit_transform(vz) svd_tfidf.shape svd.explained_variance_ratio_ print(svd_tfidf.shape) len(svd_tfidf[:,1]) # Reducing the dimension from 50 to 2 #(redukcja wymiaru z 50 do 2) tsne_model = TSNE(n_components=2, verbose=1, random_state=0) tsne_tfidf = tsne_model.fit_transform(svd_tfidf) tsne_tfidf.shape # Each text is now modeled by a two dimensional vector tsne_tfidf # Plotting with "Bokeh - Python interactive visualization library # By hovering on each tweets cluster, we can see groups of texts of similar keywords and thus referring to the same topic import bokeh.plotting as bp from bokeh.models import HoverTool, BoxSelectTool from bokeh.plotting import figure, show, output_notebook output_notebook() plot_tfidf = bp.figure(plot_width=900, plot_height=700, title="tf-idf clustering of tweets", tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave", x_axis_type=None, y_axis_type=None, min_border=1) tfidf_df = pd.DataFrame(tsne_tfidf, columns=['x', 'y']) tfidf_df['text'] = tweets['text'] plot_tfidf.scatter(x='x', y='y', source=tfidf_df) bplt.figure() hover = plot_tfidf.select(dict(type=HoverTool)) hover.tooltips={"text": "@text"} show(plot_tfidf, notebook_handle=True) push_notebook() # K-Means clustering (algorytm k-średnich | centroidów) # K-means clustering is a method of vector quantization that is popular for cluster analysis in data mining # Aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster # MiniBatchKMeans is an alternative implementation that does incremental updates of the centers positions using mini-batches # (Kmeans służy do podziału danych wejściowych na z góry założoną liczbę klas.) # (Jest to jeden z algorytmów stosowany w klasteryzacji (grupowaniu) i jest częścią uczenia nienadzorowanego w Machine Learning) num_clusters = 5 kmeans_model = MiniBatchKMeans(n_clusters=num_clusters, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=False, max_iter=1000) kmeans = kmeans_model.fit(vz) kmeans_clusters = kmeans.predict(vz) kmeans_distances = kmeans.transform(vz) # Five first tweets and the associated cluster for (i, desc) in enumerate(tweets.text): if(i < 5): print("Cluster " + str(kmeans_clusters[i]) + ": " + desc + "(distance: " + str(kmeans_distances[i][kmeans_clusters[i]]) + ")") print('--------------------------') # Top features (words) that describe each cluster: # Clusters 0, 1 seem to be about low & falling unemployment in Poland sorted_centroids = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(num_clusters): print("Cluster %d:" % i, end='') for j in sorted_centroids[i, :10]: print(' %s' % terms[j], end='') print() # Let's visualize the tweets, according to their distance from each centroid in K clusters # To do this, we need to reduce the dimensionality of kmeans_distances, using t-SNE again to reduce the dimensionality from 5 down to 2 # Reducing the dimensionality of kmeans_distances tsne_kmeans = tsne_model.fit_transform(kmeans_distances) # Colorizing each tweet according to the cluster it belongs to - using bokeh colormap = np.array(["#6d8dca", "#69de53", "#723bca", "#c3e14c", "#c84dc9", "#68af4e", "#6e6cd5", "#e3be38", "#4e2d7c", "#5fdfa8", "#d34690", "#3f6d31", "#d44427", "#7fcdd8", "#cb4053", "#5e9981", "#803a62", "#9b9e39", "#c88cca", "#e1c37b", "#34223b", "#bdd8a3", "#6e3326", "#cfbdce", "#d07d3c", "#52697d", "#7d6d33", "#d27c88", "#36422b", "#b68f79"]) plot_kmeans = bp.figure(plot_width=700, plot_height=600, title="KMeans clustering of tweets", tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave", x_axis_type=None, y_axis_type=None, min_border=1) kmeans_df = pd.DataFrame(tsne_kmeans, columns=['x', 'y']) kmeans_df['cluster'] = kmeans_clusters kmeans_df['text'] = tweets['text'] plot_kmeans.scatter(x='x', y='y', color=colormap[kmeans_clusters], source=kmeans_df) bplt.figure() hover = plot_kmeans.select(dict(type=HoverTool)) hover.tooltips={"text": "@text", "cluster":"@cluster"} show(plot_kmeans,notebook_handle=True) push_notebook() # Clusters are separated # By hovering on each one of them you can see the corresponding texts # They deal appx. with the same topic # There are some overlaps between different clusters # Latent Dirichlet Allocation (LDA) # Topic modeling algorithm called LDA to uncover the latent topics in tweets # The number of topics needs to be specified upfront #logging.getLogger("lda").setLevel(logging.WARNING) cvectorizer = CountVectorizer(min_df=3, max_features=10000, tokenizer=tokenizer, ngram_range=(1,1)) cvz = cvectorizer.fit_transform(tweets['text']) n_topics = 10 n_iter = 100 lda_model = lda.LDA(n_topics=n_topics, n_iter=n_iter) X_topics = lda_model.fit_transform(cvz) # We can inspect the words that are most relevant to a topic # Topics are dealing mostly with: low and/or falling unemployment in Poland (topic 2), people having jobs (topic 0,8) and growing economy (topic 4, 5) # Topics in more detail below n_top_words = 8 topic_summaries = [] topic_word = lda_model.topic_word_ vocab = cvectorizer.get_feature_names() for i, topic_dist in enumerate(topic_word): topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1] topic_summaries.append(' '.join(topic_words)) print('Topic {}: {}'.format(i, ' '.join(topic_words))) lda_model.components_ lda_model.loglikelihood() # To visualize the tweets according to their topic distributions, we first need to reduce the dimensionality down to 2 using t-SNE tsne_lda = tsne_model.fit_transform(X_topics) # Let's get the main topic for each tweet doc_topic = lda_model.doc_topic_ lda_keys = [] for i, tweet in enumerate(tweets['text']): lda_keys += [doc_topic[i].argmax()] # Which we'll use to colorize them: plot_lda = bp.figure(plot_width=700, plot_height=600, title="LDA topic visualization", tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave", x_axis_type=None, y_axis_type=None, min_border=1) lda_df = pd.DataFrame(tsne_lda, columns=['x','y']) lda_df['text'] = tweets['text'] lda_df['topic'] = lda_keys lda_df['topic'] = lda_df['topic'].map(int) plot_lda.scatter(source=lda_df, x='x', y='y', color=colormap[lda_keys]) bplt.figure() hover = plot_lda.select(dict(type=HoverTool)) hover.tooltips={"text":"@text", "topic":"@topic"} show(plot_lda,notebook_handle=True) push_notebook() # Somewhat better separation between the topics # No dominant topic # Visualization of topics using pyLDAvis # Visualization to explore LDA topics using pyldavis lda_df['len_docs'] = tweets['tokens'].map(len) def prepareLDAData(): data = { 'vocab': vocab, 'doc_topic_dists': lda_model.doc_topic_, 'doc_lengths': list(lda_df['len_docs']), 'term_frequency':cvectorizer.vocabulary_, 'topic_term_dists': lda_model.components_ } return data ldadata = prepareLDAData() #import pyLDAvis pyLDAvis.enable_notebook() prepared_data = pyLDAvis.prepare(**ldadata) pyLDAvis.save_html(prepared_data,'pyldadavis.html') prepared_data # Paragraph Vectors (doc2vec) - gensim # "Paragraph Vector is an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents # The algorithm represents each document by a dense vector which is trained to predict words in the document." # Let's use doc2vec to created paragraph vectors for each tweet in the dataset tknzr = TweetTokenizer() # Let's extract the documents and tokenize them (without removing the stopwords): docs = [TaggedDocument(tknzr.tokenize(cleaning(tweets['text'])), [i]) for i, tweet in enumerate(tweets)] #docs[0] # here's what a tokenized document looks like # We train our doc2vec model with 100 dimensions, a window of size 8, a minimum word count of 5 and with 4 workers doc2vec_model = Doc2Vec(docs, size=100, window=8, min_count=5, workers=4) # Similar words to" unemployment", "falling" are: still, next, year, will be doc2vec_model.most_similar(positive=["bezrobocie", "spada"], negative=["a"]) # Let' see what each paragraph vector looks like: doc2vec_model.docvecs[0] # Now we're going to use t-SNE to reduce dimensionality and plot the tweets: doc_vectors = [doc2vec_model.docvecs[i] for i, t in enumerate(tweets[:10000])] tsne_d2v = tsne_model.fit_transform(doc_vectors) # Let's put the tweets on a 2D plane: plot_d2v = bp.figure(plot_width=900, plot_height=700, title="Tweets (doc2vec)", tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave", x_axis_type=None, y_axis_type=None, min_border=1) plot_d2v.scatter(x=tsne_d2v[:,0], y=tsne_d2v[:,1], color=colormap[lda_keys][:10000], source=bp.ColumnDataSource({ "tweet": tweets['text'], "processed": tweets['cleaning'] })) # tweets['cleaning'] hover = plot_d2v.select(dict(type=HoverTool)) hover.tooltips={"tweet": "@tweet (processed: \"@processed\")"} show(plot_d2v, notebook_handle=True) push_notebook() # LDA in gensim module # Creating the term dictionary of our courpus, where every unique term is assigned an index dictionary = corpora.Dictionary(tweets['stem']) #dictionary.save('dictionary.dict') # Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above corpus = [dictionary.doc2bow(text) for
<gh_stars>1-10 from __future__ import absolute_import, unicode_literals from six import iteritems, string_types from past.builtins import unicode import types import copy from datetime import date, datetime from uuid import uuid4 import json from elasticsearch import Elasticsearch, NotFoundError, helpers, client from . import config, querybuilder, qdsl, identity from .config import logger from .util import unset, all_subclasses from .relationship import relationship from .es_types import * # implements elastic search types class ObjectDeletedError(Exception): pass # Raised when no results are found for one() class NoResultsFound(Exception): pass # Implements callbacks across objects class VWCallback(object): _callbacks = {} @classmethod def register_callback(cls, cbtype, callback): if cls.__name__ not in cls._callbacks: cls._callbacks[cls.__name__] = {} if cbtype not in cls._callbacks[cls.__name__]: cls._callbacks[cls.__name__][cbtype] = [] if not callable(callback): raise ValueError('parameter 2 to register_callback() must be \ callable') cls._callbacks[cls.__name__][cbtype].append(callback) @classmethod def deregister_callback(cls, cbtype, callback_name): try: for cb in cls._callbacks[cls.__name__][cbtype]: if cb == callback_name or cb.__name__ == callback_name: cls._callbacks[cls.__name__][cbtype].remove(cb) break except KeyError: pass def execute_callbacks(self, cbtype, argument=None, **kwargs): try: for cb in self._callbacks[self.__class__.__name__][cbtype]: argument = cb(self, argument, **kwargs) except KeyError: pass # no callbacks by this name. return argument @classmethod def execute_class_callbacks(cls, cbtype, argument=None, **kwargs): try: for cb in cls._callbacks[cls.__name__][cbtype]: argument = cb(argument, **kwargs) except KeyError: pass # no callbacks by this name. return argument class VWBase(identity.VWIdentity, VWCallback): # connects to ES _watch = False _needs_update = False id = '' __index__ = None def __init__(self, **kwargs): # the internal document self._document = {} # pickling off by default. Set by __getstate__ and __setstate__ when # the object is pickled/unpickled. Allows all values to be set self._pickling = False # relationships should not be executed when called from init (EVAR) self._no_ex = True if kwargs.get('_set_by_query'): self._new = False self._set_by_query = True else: self._new = True self.execute_callbacks('before_manual_create_model') self._needs_update = False self._watch = True # connect using defaults or override with kwargs self._es = Elasticsearch(config.dsn, **config.connection_params) self._deleted = False if self.__index__ is None: self.__index__ = config.default_index for k in dir(self): v = getattr(self, k) # skip functions and special variables if not isinstance(v, types.MethodType) and not k[0] == '_': # check if we were called with a variable. If so set try: v = kwargs[k] except KeyError: pass setattr(self, k, v) if 'id' not in kwargs: self.id = str(uuid4()) # make sure we're ready for changes self._set_by_query = False self._no_ex = False if self._new: self.execute_callbacks('after_manual_create_model') # customizations for pickling def __getstate__(self): # mark as pickling self._pickling = True # copy the __dict__. Need copy so we don't # break things when flags are removed retval = {} for k, v in iteritems(self.__dict__): if k != '_es' and k != '_pickling': retval[k] = copy.deepcopy(v) self._pickling = False return retval def __setstate__(self, state): self._pickling = True for k, v in iteritems(state): setattr(self, k, v) # recreate the _es connection (doesn't reset for some reason) self._es = Elasticsearch(config.dsn, **config.connection_params) self._pickling = False def __getattribute__(self, name): # ok this is much funky no_ex = False try: no_ex = super(VWBase, self).__getattribute__('_no_ex') except AttributeError: pass v = unset doc = None try: doc = super(VWBase, self).__getattribute__('_document') if name in doc: v = doc.get(name, unset) except AttributeError: pass if not v: default_v = super(VWBase, self).__getattribute__(name) # list and dict objects attributes cannot be set to None. Others can if v is unset or isinstance(default_v, list) or isinstance(default_v, dict): v = default_v # instance attribute was becoming a reference to the class # attribute. Not what we wanted, make a copy if doc: if not isinstance(v, types.MethodType) and not name[0] == '_': v = copy.deepcopy(v) self._document[name] = v return self._document[name] # we want to keep the relationships if set_by_query in the collection # so we only execute with direct access # (we'll see, it might have an unintended side-effect) # Relationships are not documented and really haven't been tested! if isinstance(v, relationship) and not no_ex: return v.execute(self) elif isinstance(v, string_types): try: try: return datetime.strptime(v, '%Y-%m-%dT%H:%M:%S') except ValueError: return datetime.strptime(v, '%Y-%m-%d').date() except (ValueError, AttributeError, TypeError): return v else: return v # EXPERIMENTAL def __set_relationship_value(self, name, value): curr_value = self.__get_current_value(name) # TODO ... this stuff is probably going to have to be rethought currparams = curr_value.get_relational_params(self) newparams = curr_value.get_reverse_params(self, value) if isinstance(value, list) and curr_value.reltype == 'many': if len(value) > 0: for v in value: if not isinstance(v, VWBase): raise TypeError('Update to %s must be a list of \ objects that extend VWBase' % name) elif isinstance(value, VWBase) or value is None: pass else: raise TypeError('Update to %s must extend VWBase or be None' % name) for k, v in iteritems(currparams): # if left hand value is a list if isinstance(v, list): newlist = [] # if our new value is a list we should overwrite if isinstance(value, list): newlist = map(lambda item: getattr(item, k), value) # otherwise append else: # had to reset the list because I can't directly # append newlist = super(VWBase, self).__getattribute__(k) object.__setattr__(self, k, newlist) # if left hand value is something else else: # if we're setting a list then check that the relationship # type is "many" if isinstance(value, list) and curr_value.reltype == 'many': # if the length of the list is 0 we will null the value if len(value) < 1: relation_value = '' else: # the related column on all items would have # to be the same (and really there should only # be one but we're going to ignore that for now) relation_value = getattr(value[0], k) object.__setattr__(self, k, relation_value) else: # set the related key to the related key value (v) if value: object.__setattr__(self, k, v) def __get_current_value(self, name): try: return super(VWBase, self).__getattribute__(name) except AttributeError: return None def __set_document_value(self, name, value): if name[0] == '_': # special rules for names with underscores. # setting the _ values will not trigger an update. if (name not in dir(self) or name in ['_set_by_query', '_deleted', '_watch', '_new', '_no_ex', '_pickling', '_document', '_callbacks'] or self._pickling): object.__setattr__(self, name, value) # not copied else: curr_value = create_es_type(self.__get_current_value(name)) # create as an es_type try: if type(value).__class__ == ESType: set_value_cls = False elif value is None: set_value_cls = False else: set_value_cls = True except AttributeError: if value is None: set_value_cls = False else: set_value_cls = True if set_value_cls: type_enforcement = False try: type_enforcement = self.__strict_types__ except AttributeError: try: type_enforcement = config.strict_types except AttributeError: type_enforcement = False try: if type(curr_value).__class__ == ESType: cls = curr_value.__class__ params = curr_value.es_args() # try to set the value as the same class. try: value = cls(value, **params) except: # value didn't set. Try set as es_type test_value = create_es_type(value) # dates and times are special if isinstance(test_value, DateTime): if isinstance(curr_value, DateTime): value = DateTime(test_value.year, test_value.month, test_value.day, test_value.hour, test_value.minute, test_value.second, test_value.microsecond, test_value.tzinfo, **params) elif isinstance(curr_value, Date): value = Date(test_value.year, test_value.month, test_value.day, **params) else: value = test_value else: value = test_value if type_enforcement: try: if value.__class__ != curr_value.__class__: raise TypeError('strict type enforcement is enabled. %s must be set with %s' % ( name, curr_value.__class__.__name__)) except: # errors where value isn't even a class # will raise their own exception. # Catch here to avoid attribute errors # from this block being passed along below raise except AttributeError: # curr_value couldn't be converted to an ESType # we just fall back to regular types. # if ES has an issue it will throw its own exception. pass # just set the field on the document if isinstance(value, DateTime) or isinstance(value, datetime): self._document[name] = value.strftime('%Y-%m-%dT%H:%M:%S') elif isinstance(value, Date) or isinstance(value, date): self._document[name] = value.strftime('%Y-%m-%d') elif isinstance(value, Boolean): self._document[name] = bool(value) else: self._document[name] = value if self._watch: object.__setattr__(self, '_needs_update', True) object.__setattr__(self, '_watch', False) def __setattr__(self, name, value): if '_deleted' in dir(self) and self._deleted: raise ObjectDeletedError # we need to do some magic if the current value is a relationship curr_value = self.__get_current_value(name) if (isinstance(curr_value, relationship) and not isinstance(value, relationship)): self.__set_relationship_value(name, value) # attribute is NOT a relationship else: self.__set_document_value(name, value) def commit(self, **kwargs): # save in the db if self._deleted and hasattr(self, 'id') and self.id: self.execute_callbacks('on_delete') self._es.delete(id=self.id, index=self.__index__, doc_type=self.__type__) else: self.execute_callbacks('before_commit') idx = self.__index__ doc_type =
<gh_stars>10-100 # Copyright 2015 Internap. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock from flexmock import flexmock, flexmock_teardown from hamcrest import assert_that, has_length, equal_to, is_, none, empty from netaddr import IPNetwork from netaddr.ip import IPAddress from netman.adapters.switches import brocade_factory_ssh, brocade_factory_telnet from netman.adapters.switches.brocade import Brocade, parse_if_ranges from netman.adapters.switches.util import SubShell from netman.core.objects.access_groups import IN, OUT from netman.core.objects.exceptions import IPNotAvailable, UnknownVlan, UnknownIP, UnknownAccessGroup, BadVlanNumber, \ BadVlanName, UnknownInterface, TrunkVlanNotSet, UnknownVrf, VlanVrfNotSet, VrrpAlreadyExistsForVlan, BadVrrpPriorityNumber, BadVrrpGroupNumber, \ BadVrrpTimers, BadVrrpTracking, NoIpOnVlanForVrrp, VrrpDoesNotExistForVlan, UnknownDhcpRelayServer, DhcpRelayServerAlreadyExists, \ VlanAlreadyExist, InvalidAccessGroupName, IPAlreadySet from netman.core.objects.interface_states import OFF, ON from netman.core.objects.port_modes import ACCESS, TRUNK from netman.core.objects.switch_descriptor import SwitchDescriptor class BrocadeTest(unittest.TestCase): def setUp(self): self.switch = Brocade(SwitchDescriptor(model='brocade', hostname="my.hostname"), None) SubShell.debug = True self.shell_mock = flexmock() self.switch.shell = self.shell_mock def tearDown(self): flexmock_teardown() def test_switch_has_a_logger_configured_with_the_switch_name(self): assert_that(self.switch.logger.name, is_(Brocade.__module__ + ".my.hostname")) def test_ip_redirect_enable(self): self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return( vlan_with_vif_display(1234, 999, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([ "interface ve 999", "!", ]) self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([]) self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("ip redirect").once().ordered() self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice() self.switch.set_vlan_icmp_redirects_state(1234, True) def test_ip_redirect_disable(self): self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return( vlan_with_vif_display(1234, 999, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([ "interface ve 999", "!", ]) self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([]) self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered() self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice() self.switch.set_vlan_icmp_redirects_state(1234, False) def test_set_vlan_icmp_redirects_state_without_interface_creates_it(self): self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return( vlan_with_vif_display(1234, 999, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([ "Error - ve 999 was not configured" ]) self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered() self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice() self.switch.set_vlan_icmp_redirects_state(1234, False) def test_set_vlan_icmp_redirects_state_unknown_vlan(self): self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([ "Error: vlan 1234 is not configured" ]) with self.assertRaises(UnknownVlan) as expect: self.switch.set_vlan_icmp_redirects_state(1234, False) assert_that(str(expect.exception), equal_to("Vlan 1234 not found")) def test_get_vlans(self): self.shell_mock.should_receive("do").with_args("show running-config vlan | begin vlan").once().ordered().and_return([ "vlan 1 name DEFAULT-VLAN", "" " no untagged ethe 1/1 ethe 1/3 to 1/22", "!", "vlan 201", " tagged ethe 1/1", " router-interface ve 201", "!", "vlan 2222 name your-name-is-way-too-long-for-t", " tagged ethe 1/1", " untagged ethe 1/2", "!", "vlan 3333 name some-name", "!", "!" ]) self.shell_mock.should_receive("do").with_args("show running-config interface").once()\ .ordered().and_return([ 'interface ve 428', ' port-name "My Awesome Port Name"', ' ip address 10.241.0.33/27', ' ip access-group ACL-IN in', ' ip access-group ACL-OUT out', '!', 'interface ve 201', ' vrf forwarding SHIZZLE', ' ip address 1.1.1.1/24', ' ip address 2.1.1.1/27', ' ip address 1.1.1.9/24 secondary', ' ip helper-address 10.10.10.1', ' ip helper-address 10.10.10.2', ' ip vrrp-extended auth-type simple-text-auth VLAN201', ' ip vrrp-extended vrid 1', ' backup priority 110 track-priority 50', ' ip-address 1.1.1.2', ' hello-interval 5', ' dead-interval 15', ' advertise backup', ' track-port ethernet 1/1', ' activate', ' ip vrrp-extended vrid 2', ' backup priority 110 track-priority 50', ' ip-address 1.1.1.3', ' ip-address 1.1.1.4', ' hello-interval 5', ' dead-interval 15', ' advertise backup', ' track-port ethernet 1/1', ' activate', ' no ip redirect' '!', 'interface ve 1203', '!', 'interface ve 3993', ' port-name Another-port-name', ' ip address 4.4.4.0/27', '!']) vlan1, vlan201, vlan2222, vlan3333 = self.switch.get_vlans() assert_that(vlan1.number, equal_to(1)) assert_that(vlan1.name, equal_to("default")) assert_that(vlan1.ips, has_length(0)) assert_that(vlan1.vrf_forwarding, is_(none())) assert_that(vlan201.number, equal_to(201)) assert_that(vlan201.name, equal_to(None)) assert_that(vlan201.ips, has_length(3)) assert_that(vlan201.vrf_forwarding, is_("SHIZZLE")) assert_that(vlan201.icmp_redirects, equal_to(False)) assert_that(vlan2222.number, equal_to(2222)) assert_that(vlan2222.name, equal_to("your-name-is-way-too-long-for-t")) assert_that(vlan2222.ips, has_length(0)) assert_that(vlan2222.icmp_redirects, equal_to(True)) assert_that(vlan3333.number, equal_to(3333)) assert_that(vlan3333.name, equal_to("some-name")) assert_that(vlan3333.ips, has_length(0)) vrrp_group1, vrrp_group2 = vlan201.vrrp_groups assert_that(len(vrrp_group1.ips), equal_to(1)) assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2'))) assert_that(vrrp_group1.hello_interval, equal_to(5)) assert_that(vrrp_group1.dead_interval, equal_to(15)) assert_that(vrrp_group1.priority, equal_to(110)) assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1')) assert_that(vrrp_group1.track_decrement, equal_to(50)) assert_that(len(vrrp_group2.ips), equal_to(2)) assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3'))) assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4'))) assert_that(vrrp_group2.hello_interval, equal_to(5)) assert_that(vrrp_group2.dead_interval, equal_to(15)) assert_that(vrrp_group2.priority, equal_to(110)) assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1')) assert_that(vrrp_group2.track_decrement, equal_to(50)) assert_that(len(vlan201.dhcp_relay_servers), equal_to(2)) assert_that(str(vlan201.dhcp_relay_servers[0]), equal_to('10.10.10.1')) assert_that(str(vlan201.dhcp_relay_servers[1]), equal_to('10.10.10.2')) def test_get_vlan_with_no_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return( vlan_display(1750) ) vlan = self.switch.get_vlan(1750) assert_that(vlan.number, is_(1750)) assert_that(vlan.name, is_(None)) assert_that(vlan.access_groups[IN], is_(none())) assert_that(vlan.access_groups[OUT], is_(none())) assert_that(vlan.vrf_forwarding, is_(none())) assert_that(vlan.ips, is_(empty())) assert_that(vlan.vrrp_groups, is_(empty())) assert_that(vlan.dhcp_relay_servers, is_(empty())) def test_get_vlan_with_an_empty_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return( vlan_with_vif_display(1750, 999, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([ "interface ve 999", "!", ]) vlan = self.switch.get_vlan(1750) assert_that(vlan.number, is_(1750)) assert_that(vlan.name, is_("Shizzle")) assert_that(vlan.access_groups[IN], is_(none())) assert_that(vlan.access_groups[OUT], is_(none())) assert_that(vlan.vrf_forwarding, is_(none())) assert_that(vlan.ips, is_(empty())) assert_that(vlan.vrrp_groups, is_(empty())) assert_that(vlan.dhcp_relay_servers, is_(empty())) def test_get_vlan_with_a_full_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return( vlan_with_vif_display(1750, 1750, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once().ordered().and_return([ "interface ve 1750", " vrf forwarding SHIZZLE", " ip address 1.1.1.1/24", " ip address 2.1.1.1/27", " ip address 1.1.1.9/24 secondary", " ip access-group ACL-IN in", " ip access-group ACL-OUT out", " ip helper-address 10.10.10.1", " ip helper-address 10.10.10.2", " ip vrrp-extended auth-type simple-text-auth VLAN201", " ip vrrp-extended vrid 1", " backup priority 110 track-priority 50", " ip-address 1.1.1.2", " hello-interval 5", " dead-interval 15", " advertise backup", " track-port ethernet 1/1", " activate", " ip vrrp-extended vrid 2", " backup priority 110 track-priority 50", " ip-address 1.1.1.3", " ip-address 1.1.1.4", " hello-interval 5", " dead-interval 15", " advertise backup", " track-port ethernet 1/1", " activate", "!", ]) vlan = self.switch.get_vlan(1750) assert_that(vlan.number, is_(1750)) assert_that(vlan.name, is_("Shizzle")) assert_that(vlan.access_groups[IN], is_("ACL-IN")) assert_that(vlan.access_groups[OUT], is_("ACL-OUT")) assert_that(vlan.vrf_forwarding, is_("SHIZZLE")) assert_that(vlan.ips, has_length(3)) assert_that(vlan.icmp_redirects, equal_to(True)) vrrp_group1, vrrp_group2 = vlan.vrrp_groups assert_that(len(vrrp_group1.ips), equal_to(1)) assert_that(vrrp_group1.ips[0], equal_to(IPAddress('172.16.58.3'))) assert_that(vrrp_group1.hello_interval, equal_to(5)) assert_that(vrrp_group1.dead_interval, equal_to(15)) assert_that(vrrp_group1.priority, equal_to(110)) assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1')) assert_that(vrrp_group1.track_decrement, equal_to(50)) assert_that(len(vrrp_group2.ips), equal_to(2)) assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3'))) assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4'))) assert_that(vrrp_group2.hello_interval, equal_to(5)) assert_that(vrrp_group2.dead_interval, equal_to(15)) assert_that(vrrp_group2.priority, equal_to(110)) assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1')) assert_that(vrrp_group2.track_decrement, equal_to(50)) assert_that(len(vlan.dhcp_relay_servers), equal_to(2)) assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1')) assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2')) def test_get_vlan_interface_with_untagged_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return( vlan_display(1, 'DEFAULT-VLAN', tagged_port_str="ethe 1/2 ethe 1/23 to 1/24") ) vlan_interfaces = self.switch.get_vlan_interfaces(1) assert_that(vlan_interfaces, equal_to(["ethernet 1/2", "ethernet 1/23", "ethernet 1/24"])) def test_get_vlan_interface_with_tagged_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return( vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/2") ) vlan_interfaces = self.switch.get_vlan_interfaces(1) assert_that(vlan_interfaces, equal_to(["ethernet 1/2"])) def test_get_vlan_interface_with_untagged_and_tagged_interface(self): self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return( vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/1", tagged_port_str="ethe 1/2 ethe 1/23 to 1/24") ) vlan_interfaces = self.switch.get_vlan_interfaces(1) assert_that(vlan_interfaces, equal_to(["ethernet 1/1", "ethernet 1/2", "ethernet 1/23", "ethernet 1/24"])) def test_get_vlan_interface_unknown_vlan(self): self.shell_mock.should_receive("do").with_args("show vlan inexistent").once().ordered().and_return([ "Error: vlan inexistent is not configured" ]) with self.assertRaises(UnknownVlan): self.switch.get_vlan_interfaces("inexistent") def test_get_vlan_unknown_interface_raises(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return([ "Error: vlan 1750 is not configured" ]) with self.assertRaises(UnknownVlan) as expect: self.switch.get_vlan(1750) assert_that(str(expect.exception), equal_to("Vlan 1750 not found")) def test_get_vlan_with_both_ip_and_ipv6_vrrp_groups_ipv6_is_ignored(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return( vlan_with_vif_display(1750, 1750, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once()\ .ordered().and_return([ 'interface ve 1750', 'port-name vrrp-extended vrid 42', ' ip address 10.241.0.33/27', ' no ip redirect', ' ip helper-address 10.10.10.1', ' ip helper-address 10.10.10.2', ' ipv6 address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64', ' ipv6 address fdf8:f53e:61e4::18/64', ' ipv6 nd suppress-ra', ' ip vrrp-extended vrid 42', ' backup priority 130 track-priority 20', ' ip-address 1.1.1.2', ' advertise backup', ' hello-interval 4', ' track-port ethernet 1/3', ' activate', ' ipv6 vrrp-extended vrid 43', ' backup priority 110 track-priority 50', ' ipv6-address fdf8:f53e:61e4::18', ' advertise backup', ' hello-interval 5', ' track-port ethernet 1/2', ' activate', '!']) vlan = self.switch.get_vlan(1750) assert_that(vlan.number, is_(1750)) assert_that(vlan.ips, has_length(1)) assert_that(vlan.icmp_redirects, equal_to(False)) assert_that(vlan.vrrp_groups, has_length(1)) vrrp_group1 = vlan.vrrp_groups[0] assert_that(len(vrrp_group1.ips), equal_to(1)) assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2'))) assert_that(vrrp_group1.hello_interval, equal_to(4)) assert_that(vrrp_group1.priority, equal_to(130)) assert_that(vrrp_group1.track_id, equal_to('ethernet 1/3')) assert_that(vrrp_group1.track_decrement, equal_to(20)) assert_that(len(vlan.dhcp_relay_servers), equal_to(2)) assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1')) assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2')) def test_get_vlan_with_both_ip_and_ipv6_in_the_same_vrrp_group(self): self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return( vlan_with_vif_display(1750, 1750, name="Shizzle") ) self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once() \ .ordered()\ .and_return(['interface ve 1750', 'port-name vrrp-extended vrid 42', ' ip address 10.241.0.33/27', ' no ip redirect', ' ip helper-address 10.10.10.1', ' ip helper-address 10.10.10.2', ' ipv6 address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64', ' ipv6 address fdf8:f53e:61e4::18/64', ' ipv6 nd suppress-ra', ' ip vrrp-extended vrid 42', ' backup priority 130 track-priority 20', ' ip-address 1.1.1.2', ' advertise backup', ' hello-interval 4', ' track-port ethernet 1/3', ' activate', ' ipv6 vrrp-extended vrid 42', ' backup priority 170 track-priority 40', ' ipv6-address fdf8:f53e:61e4::18', ' advertise backup', ' hello-interval 400', ' track-port ethernet 4/6', ' activate', '!']) vlan = self.switch.get_vlan(1750) assert_that(vlan.number, is_(1750)) assert_that(vlan.ips, has_length(1)) assert_that(vlan.icmp_redirects, equal_to(False)) vrrp_group = vlan.vrrp_groups[0] assert_that(len(vrrp_group.ips), equal_to(1)) assert_that(vrrp_group.ips[0], equal_to(IPAddress('1.1.1.2'))) assert_that(vrrp_group.hello_interval, equal_to(4)) assert_that(vrrp_group.priority, equal_to(130)) assert_that(vrrp_group.track_id, equal_to('ethernet 1/3')) assert_that(vrrp_group.track_decrement, equal_to(20)) def test_add_vlan(self): self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([ "Error: vlan 2999 is not configured" ]) self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("vlan 2999 name Gertrude").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered() self.switch.add_vlan(2999, name="Gertrude") def test_add_vlan_bad_number(self): self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([ "Error: vlan 5000 is not configured" ]) self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertrude").once().ordered().and_return([ "Error: vlan id 4091 is outside of allowed max of 4090" ]) self.shell_mock.should_receive("do").with_args("exit").once().ordered() with self.assertRaises(BadVlanNumber) as expect: self.switch.add_vlan(5000, name="Gertrude") assert_that(str(expect.exception), equal_to("Vlan number is invalid")) def test_add_vlan_bad_name(self): self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([ "Error: vlan 5000 is not configured" ]) self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered() self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertr ude").once().ordered().and_return([ "Invalid input -> ude" ]) self.shell_mock.should_receive("do").with_args("exit").once().ordered() with self.assertRaises(BadVlanName)
# -*- coding: utf-8 -*- """ Created on Tue Dec 14 07:48:19 2021 @author: <NAME> """ import pandas as pd import numpy as np # Helper functions from preprocessing import process_text, convert_to_int, lda_preprocess from genre_processing import clean_genre, group_genre from pprint import pprint # BERTopic from sentence_transformers import SentenceTransformer from bertopic import BERTopic from umap import UMAP # LDA imports from gensim.corpora import Dictionary from gensim.models import LdaModel, Phrases, CoherenceModel import logging # Define classes class lda_model: def __init__(self, nr_topics=20, nr_passes=50): self.nr_topics = nr_topics self.nr_passes = nr_passes self.is_fitted = False def _create_lda_documents(docs): """ Convert the documents to a structure that aligns with the LdaModel. Parameters ---------- docs : pd.Series, np.array or similar one dimensional structure Contains the synopses of all the movies in the training data. Returns ------- documents : list of lists A structure to work with the LdaModel from gensim. """ # Preprocess the documents to work with the LDA model documents = lda_preprocess(docs) # Create bigrams bigram = Phrases(documents) for idx in range(len(documents)): for token in bigram[documents[idx]]: if '_' in token: # Token is a bigram, add to document. documents[idx].append(token) return documents def _convert_lda_to_df(self, model_preds): """ Save the predicted probabilites for each document to belong to a topic in a data frame. Parameters ---------- model_preds : list of tuples Structure as generated by predicting topics for new documents. Returns ------- lda_df : pd.DataFrame A data frame with all possible topic predictions and each documents probability of being in that topic. """ lda_df = pd.DataFrame(columns=range(self.nr_topics)) for doc in model_preds: # Convert list of tuple to a dataframe a = pd.DataFrame.from_dict({x[0]:x[1] for x in doc}, orient="index").T # Add extra columns so that it contains all topics a = a.reindex(columns=range(self.nr_topics), fill_value=0) # Append to the dataframe lda_df = lda_df.append(a) lda_df.reset_index(inplace=True, drop=True) return lda_df def create_dictionary(self, X): """ Create the dicationary, corpus needed for the LDA model and for coherence measures. Parameters ---------- X : pd.Series, np.array or similar one dimensional structure Contains the synopses of all movies to examine. Returns ------- documents : list The documents pre-processed to work with LdaModel. dictionary : gensim.corpora.dictionary.Dictionary Dictionary of all words and id mappings. corpus : list List of the documents as a bag of words. id2word : mapping A mapping from word id to the actual word. """ # Convert the input docs to LDA friendly documents documents = self._create_lda_documents(X) # Create a dictionary representation of the documents dictionary = Dictionary(documents) # Transform documents to a bag of words reprentation (vectorized form) corpus = [dictionary.doc2bow(doc) for doc in documents] # Make a index to word dictionary. temp = dictionary[0] # This is only to "load" the dictionary. id2word = dictionary.id2token # Update values in the object self.documents = documents self.dictionary = dictionary self.corpus = corpus self.id2word = id2word return documents, dictionary, corpus, id2word def fit(self, X): """ Fit the LdaModel as specifed by the input parameters. Also saves a logfile. # The code for the LDA model was inspired by the gensim documentation: # https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html#pre-process-and-vectorize-the-documents Parameters ---------- X : pd.Series, np.array or similar one dimensional structure Contains the synopses of all the movies in the training data. Returns ------- None. """ try: # Attempt to load the model if one already exists model = LdaModel.load(f"../Models/gensim/model{self.nr_topics}_{self.nr_passes}pass") except FileNotFoundError: # Create the dictionary, corpus and id2word self.create_dictionary(X) # Create logging file logging.basicConfig(filename='gensim.log', format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO) # Fit the model model = LdaModel( corpus=self.corpus, num_topics=self.nr_topics, passes=self.nr_passes, alpha="auto", eta="auto", id2word=self.id2word, random_state=0 ) # Save the resulting model model.save(f"../Models/gensim/model{self.nr_topics}_{self.nr_passes}pass") # Shutdown the logging file logging.shutdown() self.is_fitted = True # Save the resulting model in the object self.model = model def predict(self, X_test): """ Predict the topics for previously unseen documents. Parameters ---------- X_test : pd.Series, np.array or similar one dimensional structure Contains the synopses of all the movies in the training data. Returns ------- predicted_probs : pd.DataFrame A data frame consisting of the predicted probabilites of each topic for all documents in test_docs. """ try: # Try reading the file predicted_probs = pd.read_csv(f"../Output/gensim/lda{self.nr_topics}_df.csv") except FileNotFoundError: # Preprocess the documents to work with the model test_docs_lda = self._create_lda_documents(X_test) # Create test corpus test_corpus = [self.dictionary.doc2bow(doc) for doc in test_docs_lda] # Get the predicted probabilites of belonging to each topic model_preds = [doc for doc in self.model.get_document_topics(test_corpus)] # Get the predicted probabilites of belonging to each topic predicted_probs = self._convert_lda_to_df(model_preds, self.nr_topics) # Save to csv if it does not exist predicted_probs.to_csv(f"../Output/gensim/lda{self.nr_topics}_df.csv", index=False) return predicted_probs def get_topics_per_class(self, y): """ Count the number of topics per class occurrence. The topics are here generated by argmax, to contrast with BERTopic in which it is chosen from HDBSCAN. Parameters ---------- y : pd.Series, np.array or similar one dimensional structure The genre labels for the plots. Returns ------- model_labels : pd.DataFrame A data frame consisting of counts of class-topic combinations. """ try: # Try reading the files model_probs = pd.read_csv(f"../Output/gensim/model{self.nr_topics}_df.csv") model_labels = pd.read_csv(f"../Output/gensim/model{self.nr_topics}_labels.csv") except FileNotFoundError: # Convert the estimated probabilites for each topics (which may not) # include all topics to a data frame with all topics included model_probs = self._convert_lda_to_df([doc for doc in self.model.get_document_topics(self.corpus)], nr_topics=self.nr_topics) # If file doesn't exist: create it model_probs.to_csv(f"../Output/gensim/model{self.nr_topics}_df.csv", index=False) # Classify the topics by the maximum probability and calculate the # size of each combination of Class (truth) and Topic (predicted argmax) model_labels = pd.DataFrame([y, np.argmax(model_probs.values, axis=1)]).T.\ rename(columns={0: "Class", "Unnamed 0": "Topic"}).groupby(["Class", "Topic"]).\ size().reset_index().rename(columns={0: "Frequency"}) # If file doesn't exist: create it model_labels.to_csv(f"../Output/gensim/model{self.nr_topics}_labels.csv", index=False) return model_labels def coherence_score(self, X): """ Calculate coherence metric for LDA models using NPMI. Parameters ---------- X : pd.Series, np.array or similar one dimensional structure Contains the documents in the training data. Returns ------- c_npmi : float The coherence score for the generated topics. """ if not self.is_fitted: self.create_dictionary(X) # Calculate coherence score c_npmi = CoherenceModel(model=lda_base20.model, corpus=self.corpus, dictionary=self.dictionary, coherence='c_nmpi').get_coherence() return c_npmi class BERT_model: def __init__(self, min_topic_size=10): """ Create a new object of the custom BERT_model class used for this report. Parameters ---------- min_topic_size : int, the default is 10. The minimum size of the topics. Returns ------- None. """ # Path to the the BERT model bert_path = 'BERT/all-MiniLM-L6-v2' # Specify the embedding model self.sentence_model = SentenceTransformer(bert_path) # Specify UMAP model with specfied random state (otherwise default) self.umap_model = UMAP(n_neighbors=15, n_components=5, min_dist=0.0, metric='cosine', random_state=42) # Save min_topic_size self.min_topic_size = min_topic_size # Topic model with greater topic size and auto topic reduction self.topic_model = BERTopic(embedding_model=self.sentence_model, calculate_probabilities=True, n_gram_range=(1,2), min_topic_size=self.min_topic_size, nr_topics="auto", umap_model=self.umap_model) # Placholders self.probs = None self.topics = None def fit(self, X, y=None): """ Fit the transform on the given data, with or without a class label. Will attempt to read the files if they exist. Parameters ---------- X : pd.Series, np.array or similar one dimensional structure Contains the synopses of all the movies in the training data. y : pd.Series, np.array or similar one dimensional structure The genres of the movie synopses indexed by integers. The default is None. Returns ------- None. Instead the topic model is updated and probabilities and topics saved within the class. """ # Specify the model name self.model_name = "sup" if y is None: self.model_name= "unsup" try: # Attempt to read the files if y is None: # Unsupervised model topic_model = BERTopic.load(f"../Models/BERTopic/unsup_bigram_model_auto{self.min_topic_size}") topic_df = pd.read_csv(f"../Output/BERTopic/unsup_bigram_topics_auto{self.min_topic_size}.csv") else: # Supervised model topic_model = BERTopic.load(f"../Models/BERTopic/sup_bigram_model_auto{self.min_topic_size}") topic_df = pd.read_csv(f"../Output/BERTopic/sup_bigram_topics_auto{self.min_topic_size}.csv") # Split to corresponding entries topics = np.array(topic_df["topic"]) probs = np.array(topic_df.drop("topic", axis=1)) # Update the topic model self.topic_model = topic_model except FileNotFoundError: # If the file does not exist; create it if y is None: # Unsupervised model topics, probs = self.topic_model.fit_transform(X) self.topic_model.save(f"../Models/BERTopic/unsup_bigram_model_auto{self.min_topic_size}") pd.DataFrame(probs).assign(topic=topics).\ to_csv(f"../Output/BERTopic/unsup_bigram_topics_auto{self.min_topic_size}.csv", index=False) else: # Supervised